hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
738da3aafd326eac7396f2d8597a9ed6a5308ef2
| 6,677 |
py
|
Python
|
tests/rbac/common/assertions/key.py
|
fthornton67/sawtooth-next-directory
|
79479afb8d234911c56379bb1d8abf11f28ef86d
|
[
"Apache-2.0"
] | 75 |
2018-04-06T09:13:34.000Z
|
2020-05-18T18:59:47.000Z
|
tests/rbac/common/assertions/key.py
|
fthornton67/sawtooth-next-directory
|
79479afb8d234911c56379bb1d8abf11f28ef86d
|
[
"Apache-2.0"
] | 989 |
2018-04-18T21:01:56.000Z
|
2019-10-23T15:37:09.000Z
|
tests/rbac/common/assertions/key.py
|
fthornton67/sawtooth-next-directory
|
79479afb8d234911c56379bb1d8abf11f28ef86d
|
[
"Apache-2.0"
] | 72 |
2018-04-13T18:29:12.000Z
|
2020-05-29T06:00:33.000Z
|
# Copyright 2019 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
"""Key assertion helpers"""
# pylint: disable=invalid-name,no-member
import binascii
import sawtooth_signing
from sawtooth_signing.secp256k1 import Secp256k1PublicKey
from sawtooth_signing.secp256k1 import Secp256k1PrivateKey
from sawtooth_signing.core import ParseError
from rbac.common.crypto.keys import ELLIPTIC_CURVE_ALGORITHM
from rbac.common.crypto.keys import PRIVATE_KEY_LENGTH
from rbac.common.crypto.keys import PUBLIC_KEY_LENGTH
from rbac.common.crypto.keys import PRIVATE_KEY_PATTERN
from rbac.common.crypto.keys import PUBLIC_KEY_PATTERN
from rbac.common.logs import get_default_logger
from tests.rbac.common.assertions.common import CommonAssertions
LOGGER = get_default_logger(__name__)
class KeyAssertions(CommonAssertions):
"""Key assertion helpers"""
def assertIsPrivateKey(self, key):
"""Sanity checks a private key
key: hex string, bytes, or Secp256k1PrivateKey
returns: Secp256k1PrivateKey"""
self.assertIsNotNone(key)
if isinstance(key, Secp256k1PrivateKey):
return self.assertIsPrivateKeySecp256k1(key)
if isinstance(key, str):
return self.assertIsPrivateKeyHex(key)
if isinstance(key, bytes):
return self.assertIsPrivateKeyBytes(key)
raise ParseError("Unable to parse private key: {}".format(type(key)))
def assertIsPublicKey(self, key):
"""Sanity checks a public key
key -- hex string, bytes, or Secp256k1PublicKey
returns -- Secp256k1PublicKey"""
self.assertIsNotNone(key)
if isinstance(key, Secp256k1PublicKey):
return self.assertIsPublicKeySecp256k1(key)
if isinstance(key, str):
return self.assertIsPublicKeyHex(key)
if isinstance(key, bytes):
return self.assertIsPublicKeyBytes(key)
raise ParseError("Unable to parse public key: {}".format(type(key)))
def assertIsKeyPair(self, public_key, private_key):
"""Sanity checks public & private key and
verifies they are a matching key pair
public_key -- hex string, bytes, or Secp256k1PublicKey
private_key -- hex string, bytes, or Secp256k1PrivateKey
returns -- Secp256k1PublicKey, Secp256k1PrivateKey"""
public_key = self.assertIsPublicKey(public_key)
private_key = self.assertIsPrivateKey(private_key)
self.assertIsKeyPairSecp256k1(public_key, private_key)
return public_key, private_key
def assertIsPrivateKeySecp256k1(self, key):
"""Sanity checks a Secp256k1PrivateKey private key"""
self.assertIsInstance(key, Secp256k1PrivateKey)
self.assertIsInstance(key.as_hex(), str)
self.assertEqual(len(key.as_hex()), PRIVATE_KEY_LENGTH * 2)
self.assertTrue(PRIVATE_KEY_PATTERN.match(key.as_hex()))
self.assertIsInstance(key.as_bytes(), bytes)
self.assertEqual(len(key.as_bytes()), PRIVATE_KEY_LENGTH)
self.assertEqual(key.as_hex(), str(binascii.hexlify(key.as_bytes()), "ascii"))
self.assertEqual(binascii.unhexlify(key.as_hex()), key.as_bytes())
return key
def assertIsPublicKeySecp256k1(self, key):
"""Sanity checks a Secp256k1PublicKey public key"""
self.assertIsInstance(key, Secp256k1PublicKey)
self.assertIsInstance(key.as_hex(), str)
self.assertEqual(len(key.as_hex()), PUBLIC_KEY_LENGTH * 2)
self.assertTrue(PUBLIC_KEY_PATTERN.match(key.as_hex()))
self.assertIsInstance(key.as_bytes(), bytes)
self.assertEqual(len(key.as_bytes()), PUBLIC_KEY_LENGTH)
self.assertEqual(key.as_hex(), str(binascii.hexlify(key.as_bytes()), "ascii"))
self.assertEqual(binascii.unhexlify(key.as_hex()), key.as_bytes())
return key
def assertIsPrivateKeyHex(self, key):
"""Sanity checks a hexidecimal string private key"""
self.assertIsInstance(key, str)
self.assertTrue(PRIVATE_KEY_PATTERN.match(key))
key = Secp256k1PrivateKey.from_hex(key)
self.assertIsPrivateKeySecp256k1(key)
return key
def assertIsPublicKeyHex(self, key):
"""Sanity checks a hexidecimal string public key"""
self.assertIsInstance(key, str)
self.assertTrue(PUBLIC_KEY_PATTERN.match(key))
key = Secp256k1PublicKey.from_hex(key)
self.assertIsPublicKeySecp256k1(key)
return key
def assertIsPrivateKeyBytes(self, key):
"""Sanity checks a private key in bytes"""
self.assertIsInstance(key, bytes)
self.assertEqual(len(key), PRIVATE_KEY_LENGTH)
key = Secp256k1PrivateKey.from_hex(str(binascii.hexlify(key), "ascii"))
self.assertIsPrivateKeySecp256k1(key)
return key
def assertIsPublicKeyBytes(self, key):
"""Sanity checks a public key in bytes"""
self.assertIsInstance(key, bytes)
self.assertEqual(len(key), PUBLIC_KEY_LENGTH)
key = Secp256k1PublicKey.from_hex(str(binascii.hexlify(key), "ascii"))
self.assertIsPublicKeySecp256k1(key)
return key
def assertIsKeyPairSecp256k1(self, public_key, private_key):
"""Test that a given public_key and a given private_key are
a matched keypair"""
self.assertIsPublicKeySecp256k1(public_key)
self.assertIsPrivateKeySecp256k1(private_key)
context = sawtooth_signing.create_context(ELLIPTIC_CURVE_ALGORITHM)
self.assertEqual(
public_key.as_bytes(), context.get_public_key(private_key).as_bytes()
)
def assertIsKeyPairHex(self, public_key, private_key):
"""Test that a given public_key and a given private_key are
a matched keypair"""
self.assertIsInstance(public_key, Secp256k1PublicKey)
self.assertIsInstance(private_key, Secp256k1PrivateKey)
context = sawtooth_signing.create_context(ELLIPTIC_CURVE_ALGORITHM)
self.assertEqual(
public_key.as_bytes(), context.get_public_key(private_key).as_bytes()
)
| 42.801282 | 86 | 0.702711 |
8384c7dc83ddb4e7c8ed76df8619f0b612ea928a
| 370 |
py
|
Python
|
exercises/pt/solution_02_07.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/pt/solution_02_07.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/pt/solution_02_07.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp("Berlin is a nice city")
# Iterar nos tokens
for token in doc:
# Verifica se o token atual é um substantivo próprio.
if token.pos_ == "PROPN":
# Verifica se o próximo token é um verbo
if doc[token.i + 1].pos_ == "VERB":
print("Found proper noun before a verb:", token.text)
| 28.461538 | 65 | 0.640541 |
83c6133d5ab28e0457c672ccdadcee4994046af1
| 682 |
py
|
Python
|
linear_data_structure/best_time_to_buy_and_sell_stock.py
|
daesookimds/Algorithm
|
76f4cbfe9000e8c1736f470138499e7c735fecaa
|
[
"MIT"
] | null | null | null |
linear_data_structure/best_time_to_buy_and_sell_stock.py
|
daesookimds/Algorithm
|
76f4cbfe9000e8c1736f470138499e7c735fecaa
|
[
"MIT"
] | null | null | null |
linear_data_structure/best_time_to_buy_and_sell_stock.py
|
daesookimds/Algorithm
|
76f4cbfe9000e8c1736f470138499e7c735fecaa
|
[
"MIT"
] | null | null | null |
import sys
from typing import List
def maxProfit_brute_force(prices: List[int]) -> int:
max_price = 0
for i, price in enumerate(prices):
for j in range(i, len(prices)):
max_price = max(prices[j] - price, max_price)
return max_price
def maxProfit_Kadanes(prices: List[int]) -> int:
max_profit = 0
min_price = sys.maxsize
for price in prices:
min_price = min(min_price, price)
max_profit = max(max_profit, price - min_price)
return max_profit
def test_case():
case1 = [7, 1, 5, 3, 6, 4]
result1 = maxProfit_brute_force(case1)
print(result1)
result2 = maxProfit_Kadanes(case1)
print(result2)
| 22 | 57 | 0.651026 |
79134acaa833a965bf2edf94b06847d2f288049a
| 2,923 |
py
|
Python
|
test/test_npu/test_network_ops/test_tril.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/test_tril.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/test_tril.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020 Huawei Technologies Co., Ltd
# Copyright (c) 2019, Facebook CORPORATION.
# All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestTril(TestCase):
def test_tril(self, device):
dtype_list = [np.float32, np.float16]
format_list = [0, 3, 4]
shape_list = [[5, 5],[4, 5, 6]]
diagonal_list = [-1, 0, 1]
shape_format = [
[i, j, k, l] for i in dtype_list for j in format_list for k in shape_list for l in diagonal_list
]
for item in shape_format:
cpu_input, npu_input = create_common_tensor(item[:-1], 0, 100)
cpu_output = self.cpu_op_exec(cpu_input, item[-1])
npu_output = self.npu_op_exec(npu_input, item[-1])
self.assertRtolEqual(cpu_output, npu_output)
def test_tril_inplace(self, device):
dtype_list = [np.float32, np.float16]
format_list = [0, 3, 4]
shape_list = [[5, 5], [4, 5, 6]]
diagonal_list = [-1, 0, 1]
shape_format = [
[i, j, k, l] for i in dtype_list for j in format_list for k in shape_list for l in diagonal_list
]
for item in shape_format:
cpu_input, npu_input = create_common_tensor(item[:-1], 0, 100)
cpu_output = self.cpu_op_inplace_exec(cpu_input, item[-1])
npu_output = self.npu_op_inplace_exec(npu_input, item[-1])
self.assertRtolEqual(cpu_output, npu_output)
def cpu_op_exec(self, input, diagonal=0):
output = torch.tril(input, diagonal)
output = output.numpy()
return output
def npu_op_exec(self, input, diagonal=0):
output = torch.tril(input, diagonal)
output = output.to("cpu")
output = output.numpy()
return output
def cpu_op_inplace_exec(self, input, diagonal=0):
output = input.tril_(diagonal)
output = output.numpy()
return output
def npu_op_inplace_exec(self, input, diagonal=0):
output = input.tril_(diagonal)
output = output.to("cpu")
output = output.numpy()
return output
instantiate_device_type_tests(TestTril, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
| 37.474359 | 108 | 0.660965 |
5837df7ae8b3b0619f927476c91a7d0f28561ac5
| 3,397 |
py
|
Python
|
snake/hadOG.py
|
Bertik23/spg
|
f6449f1ca8f3a869f0f493f3988b3d84901c1be0
|
[
"MIT"
] | null | null | null |
snake/hadOG.py
|
Bertik23/spg
|
f6449f1ca8f3a869f0f493f3988b3d84901c1be0
|
[
"MIT"
] | null | null | null |
snake/hadOG.py
|
Bertik23/spg
|
f6449f1ca8f3a869f0f493f3988b3d84901c1be0
|
[
"MIT"
] | null | null | null |
from tkinter import Tk, Canvas
from random import randint
class Bunka:
def __init__(self, x,y):
self.vzhled = platno.create_rectangle(x*VEL,y*VEL+50, (x+1)*VEL, (y+1)*VEL + 50, fill="white",width=0)
self.cislo = 0
#self.text = platno.create_text(x*VEL+VEL/2, y*VEL+VEL/2 + 50, text = "0", anchor = "c", fill="#888")
class Had:
def __init__(self,x,y,delka,smer,barva,jmeno = "Anonym",hrac = False,nahoru = "",doprava="",dolu="",doleva=""):
self.x = x
self.y = y
self.delka = delka
self.smer = smer
self.barva = barva
self.jmeno = jmeno
self.hrac = hrac
self.nahoru = nahoru
self.dolu = dolu
self.doprava = doprava
self.doleva = doleva
def logika(self):
#Vaše práce...
#self.smer = nový směr
self.smer = randint(1,4)
def pohyb():
for had in hadi:
#Volání počítačem ovládaných hadů
if not had.hrac:
had.logika()
if had.smer == 1: had.y -= 1
if had.smer == 2: had.x += 1
if had.smer == 3: had.y += 1
if had.smer == 4: had.x -= 1
if had.x<0: had.x = POCET-1
if had.y<0: had.y = POCET-1
if had.x==POCET: had.x = 0
if had.y==POCET: had.y = 0
if sit[had.x][had.y].cislo > 0:
hadi.remove(had)
continue
if sit[had.x][had.y].cislo == -1:
had.delka += 1
drobek()
sit[had.x][had.y].cislo = had.delka + 1
platno.itemconfig(sit[had.x][had.y].vzhled, fill = had.barva)
for i in range(POCET):
for j in range(POCET):
if sit[i][j].cislo>0:
sit[i][j].cislo -= 1
#platno.itemconfig(sit[i][j].text, text = sit[i][j].cislo)
if sit[i][j].cislo == 0:
platno.itemconfig(sit[i][j].vzhled, fill = "white")
okno.after(1000//FPS, pohyb)
def drobek():
x = randint(0,POCET-1)
y = randint(0,POCET-1)
while sit[x][y].cislo!=0:
x = randint(0,POCET-1)
y = randint(0,POCET-1)
sit[x][y].cislo = -1
platno.itemconfig(sit[x][y].vzhled, fill = "orange")
def stisk(e):
k = e.keysym
for had in hadi:
if had.hrac:
if k == had.nahoru and had.smer!=3:
had.smer = 1
if k == had.doprava and had.smer!=4:
had.smer = 2
if k == had.dolu and had.smer!=1:
had.smer = 3
if k == had.doleva and had.smer!=2:
had.smer = 4
POCET = 40
VEL = 500/POCET
FPS = 20
DROBKY = 10
okno = Tk()
platno = Canvas(okno, height = 550, width=500)
platno.pack()
sit = []
for i in range(POCET):
sit.append([])
for j in range(POCET):
sit[i].append(Bunka(i,j))
hadi = [
Had(x = 10, y = 10, delka = 1, smer = 1, barva = "red", jmeno = "Jarda", hrac = True,
nahoru = "Up", doprava = "Right", dolu = "Down", doleva = "Left"),
Had(x = 5, y = 5, delka = 20, smer = 3, barva = "blue", jmeno = "Pepa", hrac = True,
nahoru = "w", doprava = "d", dolu = "s", doleva = "a"),
Had(x = 20, y = 20, delka = 1, smer = 1, barva = "black", jmeno = "Komp", hrac = False)
]
for i in range(DROBKY):
drobek()
okno.bind("<Key>",stisk)
pohyb()
okno.mainloop()
| 30.061947 | 115 | 0.499558 |
54bf5f5c9e4281becf7b4687643faf8d21849474
| 1,198 |
py
|
Python
|
python/coursera_python/WESLEYAN/week3/COURSERA/week_3/count_words.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/coursera_python/WESLEYAN/week3/COURSERA/week_3/count_words.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/coursera_python/WESLEYAN/week3/COURSERA/week_3/count_words.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 29 19:46:42 2017
@author: root
"""
# -count_words.py *- coding: utf-8 -*-
import sys
filename = sys.argv[1]
# print("\n",filename,"\n") # You can check that the filename is correct
text_file = open(filename) # open the file for reading
# Set up an empty dictionary to start a standard design pattern loop
words_dic = {}
# This loop adds each word to the dictionary and updates its count. Change
# all words to lower case so Horse and horse are seen as the same word.
for line in text_file: # step through each line in the text file
for word in line.lower().split(): # split into a list of words
word = word.strip("'?,.;!-/\"") # strip out the stuff we ignore
if word not in words_dic:
words_dic[word] = 0 # add word to words with 0 count
words_dic[word] = words_dic[word] + 1 # add 1 to the count
text_file.close()
# Sorts the dictionary words into a list and then print them out
print("List of words in the file with number of times each appears.")
word_list = sorted(words_dic)
for word in word_list:
print(words_dic[word], word)
| 33.277778 | 75 | 0.661102 |
3fb97c881c85959537b8a2aaf4c90f6a3db9f93e
| 36 |
py
|
Python
|
lib/python3.5/tempfile.py
|
hwroitzsch/BikersLifeSaver
|
469c738fdd6352c44a3f20689b17fa8ac04ad8a2
|
[
"MIT"
] | 1 |
2020-08-16T04:04:23.000Z
|
2020-08-16T04:04:23.000Z
|
lib/python3.5/tempfile.py
|
hwroitzsch/BikersLifeSaver
|
469c738fdd6352c44a3f20689b17fa8ac04ad8a2
|
[
"MIT"
] | 5 |
2020-06-05T18:53:24.000Z
|
2021-12-13T19:49:15.000Z
|
lib/python3.5/tempfile.py
|
hwroitzsch/BikersLifeSaver
|
469c738fdd6352c44a3f20689b17fa8ac04ad8a2
|
[
"MIT"
] | null | null | null |
/usr/local/lib/python3.5/tempfile.py
| 36 | 36 | 0.805556 |
3fc2ecdc29177b616df5342cd72fee12b0128df5
| 157 |
py
|
Python
|
python/deep_learning/FUNCTIONAL/1.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/deep_learning/FUNCTIONAL/1.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/deep_learning/FUNCTIONAL/1.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
def running_sum(numbers, start=0):
if len(numbers) == 0:
print()
return
total = numbers[0] + start
print(total,end="")
running_sum(numbers[1:],total)
| 19.625 | 34 | 0.675159 |
b77e564f01912cedaa7b35edeafdfb0ff59fb34c
| 28,052 |
py
|
Python
|
python/oneflow/utils/vision/transforms/functional.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/utils/vision/transforms/functional.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/utils/vision/transforms/functional.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import warnings
import numbers
from enum import Enum
from typing import List, Any, Tuple, Optional
import numpy as np
from PIL import Image
import math
try:
import accimage
except ImportError:
accimage = None
import oneflow as flow
from oneflow.framework.tensor import Tensor
from . import functional_pil as F_pil
from . import functional_tensor as F_t
class InterpolationMode(Enum):
r"""Interpolation modes
"""
NEAREST = "nearest"
BILINEAR = "bilinear"
BICUBIC = "bicubic"
# For PIL compatibility
BOX = "box"
HAMMING = "hamming"
LANCZOS = "lanczos"
def _interpolation_modes_from_int(i: int) -> InterpolationMode:
inverse_modes_mapping = {
0: InterpolationMode.NEAREST,
2: InterpolationMode.BILINEAR,
3: InterpolationMode.BICUBIC,
4: InterpolationMode.BOX,
5: InterpolationMode.HAMMING,
1: InterpolationMode.LANCZOS,
}
return inverse_modes_mapping[i]
pil_modes_mapping = {
InterpolationMode.NEAREST: 0,
InterpolationMode.BILINEAR: 2,
InterpolationMode.BICUBIC: 3,
InterpolationMode.BOX: 4,
InterpolationMode.HAMMING: 5,
InterpolationMode.LANCZOS: 1,
}
def _get_image_size(img: Tensor) -> List[int]:
"""Returns image size as [w, h]
"""
if isinstance(img, flow.Tensor):
return F_t._get_image_size(img)
return F_pil._get_image_size(img)
def _get_image_num_channels(img: Tensor) -> int:
"""Returns number of image channels
"""
if isinstance(img, flow.Tensor):
return F_t._get_image_num_channels(img)
return F_pil._get_image_num_channels(img)
def _is_pil_image(img: Any) -> bool:
if accimage is not None:
return isinstance(img, (Image.Image, accimage.Image))
else:
return isinstance(img, Image.Image)
def _is_numpy(img: Any) -> bool:
return isinstance(img, np.ndarray)
def _is_numpy_image(img: Any) -> bool:
return img.ndim in {2, 3}
def to_tensor(pic):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See :class:`~transforms.ToTensor` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not (_is_pil_image(pic) or _is_numpy(pic)):
raise TypeError("pic should be PIL Image or ndarray. Got {}".format(type(pic)))
if _is_numpy(pic) and not _is_numpy_image(pic):
raise ValueError(
"pic should be 2/3 dimensional. Got {} dimensions.".format(pic.ndim)
)
# default_float_dtype = flow.get_default_dtype()
default_float_dtype = flow.float32
if isinstance(pic, np.ndarray):
# handle numpy array
if pic.ndim == 2:
pic = pic[:, :, None]
img = flow.Tensor(pic.transpose((2, 0, 1)))
# backward compatibility
if img.dtype == flow.int:
return img.to(dtype=default_float_dtype).div(255)
else:
return img
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return flow.Tensor(nppic).to(dtype=default_float_dtype)
# handle PIL Image
mode_to_nptype = {"I": np.int32, "I;16": np.int16, "F": np.float32}
if mode_to_nptype.get(pic.mode, np.uint8) == np.uint8:
dtype = flow.int32
else:
dtype = flow.float32
img = flow.Tensor(
np.array(pic, mode_to_nptype.get(pic.mode, np.uint8), copy=True), dtype=dtype,
)
if pic.mode == "1":
img = 255 * img
img = flow.reshape(img, shape=(pic.size[1], pic.size[0], len(pic.getbands())))
# put it from HWC to CHW format
res = img.permute(2, 0, 1)
if img.dtype == flow.int:
res = res.to(dtype=default_float_dtype).div(255)
return res
def pil_to_tensor(pic):
"""Convert a ``PIL Image`` to a tensor of the same type.
See :class:`~vision.transforms.PILToTensor` for more details.
Args:
pic (PIL Image): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not F_pil._is_pil_image(pic):
raise TypeError("pic should be PIL Image. Got {}".format(type(pic)))
if accimage is not None and isinstance(pic, accimage.Image):
# accimage format is always uint8 internally, so always return uint8 here
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.uint8)
pic.copyto(nppic)
return flow.tensor(nppic)
# handle PIL Image
img = flow.tensor(np.asarray(pic))
img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
# put it from HWC to CHW format
img = img.permute((2, 0, 1))
return img
def convert_image_dtype(
image: flow.Tensor, dtype: flow.dtype = flow.float
) -> flow.Tensor:
"""Convert a tensor image to the given ``dtype`` and scale the values accordingly
This function does not support PIL Image.
Args:
image (flow.Tensor): Image to be converted
dtype (flow.dtype): Desired data type of the output
Returns:
Tensor: Converted image
.. note::
When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
If converted back and forth, this mismatch has no effect.
Raises:
RuntimeError: When trying to cast :class:`flow.float32` to :class:`flow.int32` or :class:`flow.int64` as
well as for trying to cast :class:`flow.float64` to :class:`flow.int64`. These conversions might lead to
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
of the integer ``dtype``.
"""
if not isinstance(image, flow.Tensor):
raise TypeError("Input img should be Tensor Image")
return F_t.convert_image_dtype(image, dtype)
def normalize(
tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False
) -> Tensor:
"""Normalize a float tensor image with mean and standard deviation.
This transform does not support PIL Image.
.. note::
This transform acts out of place by default, i.e., it does not mutates the input tensor.
See :class:`~transforms.Normalize` for more details.
Args:
tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation inplace.
Returns:
Tensor: Normalized Tensor image.
"""
if not isinstance(tensor, flow.Tensor):
raise TypeError(
"Input tensor should be a oneflow tensor. Got {}.".format(type(tensor))
)
if not tensor.dtype == flow.float:
raise TypeError(
"Input tensor should be a float tensor. Got {}.".format(tensor.dtype)
)
if tensor.ndim < 3:
raise ValueError(
"Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = "
"{}.".format(tensor.size())
)
if not inplace:
tensor = tensor.clone()
dtype = tensor.dtype
mean = flow.tensor(mean, dtype=dtype, device=tensor.device)
std = flow.tensor(std, dtype=dtype, device=tensor.device)
# TODO: use tensor.any()
# if (std == 0).any():
if std.eq(0).sum().numpy() > 0:
raise ValueError(
"std evaluated to zero after conversion to {}, leading to division by zero.".format(
dtype
)
)
if mean.ndim == 1:
mean = mean.reshape(-1, 1, 1)
if std.ndim == 1:
std = std.reshape(-1, 1, 1)
tensor = tensor.sub(mean).div(std)
# tensor.sub_(mean).div_(std)
return tensor
def resize(
img: Tensor,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
) -> Tensor:
r"""Resize the input image to the given size.
If the image is oneflow Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
img (PIL Image or Tensor): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaining
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`flow.utils.vision.transforms.InterpolationMode`.
Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
Returns:
PIL Image or Tensor: Resized image.
"""
# Backward compatibility with integer value
if isinstance(interpolation, int):
warnings.warn(
"Argument interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
interpolation = _interpolation_modes_from_int(interpolation)
if not isinstance(interpolation, InterpolationMode):
raise TypeError("Argument interpolation should be a InterpolationMode")
if not isinstance(img, (flow.Tensor, flow._oneflow_internal.Tensor)):
pil_interpolation = pil_modes_mapping[interpolation]
return F_pil.resize(img, size=size, interpolation=pil_interpolation)
return F_t.resize(img, size=size, interpolation=interpolation.value)
def scale(*args, **kwargs):
warnings.warn(
"The use of the transforms.Scale transform is deprecated, "
+ "please use transforms.Resize instead."
)
return resize(*args, **kwargs)
def pad(
img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant"
) -> Tensor:
r"""Pad the given image on all sides with the given "pad" value.
If the image is oneflow Tensor, it is expected
to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,
at most 3 leading dimensions for mode edge,
and an arbitrary number of leading dimensions for mode constant
Args:
img (PIL Image or Tensor): Image to be padded.
padding (int or sequence): Padding on each border. If a single int is provided this
is used to pad all borders. If sequence of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a sequence of length 4 is provided
this is the padding for the left, top, right and bottom borders respectively.
fill (number or str or tuple): Pixel fill value for constant fill. Default is 0.
If a tuple of length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant.
Only number is supported for oneflow Tensor.
Only int or str or tuple value is supported for PIL Image.
padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value at the edge of the image.
If input a 5D oneflow Tensor, the last 3 dimensions will be padded instead of the last 2
- reflect: pads with reflection of image without repeating the last value on the edge.
For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image repeating the last value on the edge.
For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
PIL Image or Tensor: Padded image.
"""
if not isinstance(img, flow.Tensor):
return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
"""Crop the given image at specified location and output size.
If the image is oneflow Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
If image size is smaller than output size along any edge, image is padded with 0 and then cropped.
Args:
img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
top (int): Vertical component of the top left corner of the crop box.
left (int): Horizontal component of the top left corner of the crop box.
height (int): Height of the crop box.
width (int): Width of the crop box.
Returns:
PIL Image or Tensor: Cropped image.
"""
if not isinstance(img, flow.Tensor):
return F_pil.crop(img, top, left, height, width)
return F_t.crop(img, top, left, height, width)
def center_crop(img: Tensor, output_size: List[int]) -> Tensor:
"""Crops the given image at the center.
If the image is oneflow Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
Args:
img (PIL Image or Tensor): Image to be cropped.
output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int,
it is used for both directions.
Returns:
PIL Image or Tensor: Cropped image.
"""
if isinstance(output_size, numbers.Number):
output_size = (int(output_size), int(output_size))
elif isinstance(output_size, (tuple, list)) and len(output_size) == 1:
output_size = (output_size[0], output_size[0])
image_width, image_height = _get_image_size(img)
crop_height, crop_width = output_size
if crop_width > image_width or crop_height > image_height:
padding_ltrb = [
(crop_width - image_width) // 2 if crop_width > image_width else 0,
(crop_height - image_height) // 2 if crop_height > image_height else 0,
(crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
(crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
]
img = pad(img, padding_ltrb, fill=0) # PIL uses fill value 0
image_width, image_height = _get_image_size(img)
if crop_width == image_width and crop_height == image_height:
return img
crop_top = int(round((image_height - crop_height) / 2.0))
crop_left = int(round((image_width - crop_width) / 2.0))
return crop(img, crop_top, crop_left, crop_height, crop_width)
def resized_crop(
img: Tensor,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
) -> Tensor:
"""Crop the given image and resize it to desired size.
If the image is oneflow Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
Notably used in :class:`~vision.transforms.RandomResizedCrop`.
Args:
img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
top (int): Vertical component of the top left corner of the crop box.
left (int): Horizontal component of the top left corner of the crop box.
height (int): Height of the crop box.
width (int): Width of the crop box.
size (sequence or int): Desired output size. Same semantics as ``resize``.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`vision.transforms.InterpolationMode`.
Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
Returns:
PIL Image or Tensor: Cropped image.
"""
img = crop(img, top, left, height, width)
img = resize(img, size, interpolation)
return img
def hflip(img: Tensor) -> Tensor:
"""Horizontally flip the given image.
Args:
img (PIL Image or Tensor): Image to be flipped. If img
is a Tensor, it is expected to be in [..., H, W] format,
where ... means it can have an arbitrary number of leading
dimensions.
Returns:
PIL Image or Tensor: Horizontally flipped image.
"""
if not isinstance(img, flow.Tensor):
return F_pil.hflip(img)
return F_t.hflip(img)
def vflip(img: Tensor) -> Tensor:
"""Vertically flip the given image.
Args:
img (PIL Image or Tensor): Image to be flipped. If img
is a Tensor, it is expected to be in [..., H, W] format,
where ... means it can have an arbitrary number of leading
dimensions.
Returns:
PIL Image or Tensor: Vertically flipped image.
"""
if not isinstance(img, flow.Tensor):
return F_pil.vflip(img)
return F_t.vflip(img)
def five_crop(
img: Tensor, size: List[int]
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""Crop the given image into four corners and the central crop.
If the image is oneflow Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
img (PIL Image or Tensor): Image to be cropped.
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
Returns:
tuple: tuple (tl, tr, bl, br, center)
Corresponding top left, top right, bottom left, bottom right and center crop.
"""
if isinstance(size, numbers.Number):
size = (int(size), int(size))
elif isinstance(size, (tuple, list)) and len(size) == 1:
size = (size[0], size[0])
if len(size) != 2:
raise ValueError("Please provide only two dimensions (h, w) for size.")
image_width, image_height = _get_image_size(img)
crop_height, crop_width = size
if crop_width > image_width or crop_height > image_height:
msg = "Requested crop size {} is bigger than input size {}"
raise ValueError(msg.format(size, (image_height, image_width)))
tl = crop(img, 0, 0, crop_height, crop_width)
tr = crop(img, 0, image_width - crop_width, crop_height, crop_width)
bl = crop(img, image_height - crop_height, 0, crop_height, crop_width)
br = crop(
img,
image_height - crop_height,
image_width - crop_width,
crop_height,
crop_width,
)
center = center_crop(img, [crop_height, crop_width])
return tl, tr, bl, br, center
def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[Tensor]:
"""Generate ten cropped images from the given image.
Crop the given image into four corners and the central crop plus the
flipped version of these (horizontal flipping is used by default).
If the image is oneflow Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
img (PIL Image or Tensor): Image to be cropped.
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
vertical_flip (bool): Use vertical flipping instead of horizontal
Returns:
tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
Corresponding top left, top right, bottom left, bottom right and
center crop and same for the flipped image.
"""
if isinstance(size, numbers.Number):
size = (int(size), int(size))
elif isinstance(size, (tuple, list)) and len(size) == 1:
size = (size[0], size[0])
if len(size) != 2:
raise ValueError("Please provide only two dimensions (h, w) for size.")
first_five = five_crop(img, size)
if vertical_flip:
img = vflip(img)
else:
img = hflip(img)
second_five = five_crop(img, size)
return first_five + second_five
def _get_inverse_affine_matrix(
center: List[float],
angle: float,
translate: List[float],
scale: float,
shear: List[float],
) -> List[float]:
# Helper method to compute inverse matrix for affine transformation
# As it is explained in PIL.Image.rotate
# We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1
# where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
# C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
# RSS is rotation with scale and shear matrix
# RSS(a, s, (sx, sy)) =
# = R(a) * S(s) * SHy(sy) * SHx(sx)
# = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(x)/cos(y) - sin(a)), 0 ]
# [ s*sin(a + sy)/cos(sy), s*(-sin(a - sy)*tan(x)/cos(y) + cos(a)), 0 ]
# [ 0 , 0 , 1 ]
#
# where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
# SHx(s) = [1, -tan(s)] and SHy(s) = [1 , 0]
# [0, 1 ] [-tan(s), 1]
#
# Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1
rot = math.radians(angle)
sx, sy = [math.radians(s) for s in shear]
cx, cy = center
tx, ty = translate
# RSS without scaling
a = math.cos(rot - sy) / math.cos(sy)
b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot)
c = math.sin(rot - sy) / math.cos(sy)
d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot)
# Inverted rotation matrix with scale and shear
# det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
matrix = [d, -b, 0.0, -c, a, 0.0]
matrix = [x / scale for x in matrix]
# Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty)
matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty)
# Apply center translation: C * RSS^-1 * C^-1 * T^-1
matrix[2] += cx
matrix[5] += cy
return matrix
def rotate(
img: Tensor,
angle: float,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[int]] = None,
fill: Optional[List[float]] = None,
resample: Optional[int] = None,
) -> Tensor:
"""Rotate the image by angle.
If the image is oneflow Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
img (PIL Image or Tensor): image to be rotated.
angle (number): rotation angle value in degrees, counter-clockwise.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`flow.utils.vision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
expand (bool, optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
Default is the center of the image.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
Returns:
PIL Image or Tensor: Rotated image.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
if resample is not None:
warnings.warn(
"Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
)
interpolation = _interpolation_modes_from_int(resample)
# Backward compatibility with integer value
if isinstance(interpolation, int):
warnings.warn(
"Argument interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
interpolation = _interpolation_modes_from_int(interpolation)
if not isinstance(angle, (int, float)):
raise TypeError("Argument angle should be int or float")
if center is not None and not isinstance(center, (list, tuple)):
raise TypeError("Argument center should be a sequence")
if not isinstance(interpolation, InterpolationMode):
raise TypeError("Argument interpolation should be a InterpolationMode")
if not isinstance(img, flow.Tensor):
pil_interpolation = pil_modes_mapping[interpolation]
return F_pil.rotate(
img,
angle=angle,
interpolation=pil_interpolation,
expand=expand,
center=center,
fill=fill,
)
center_f = [0.0, 0.0]
if center is not None:
img_size = _get_image_size(img)
# Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, img_size)]
# due to current incoherence of rotation angle direction between affine and rotate implementations
# we need to set -angle.
matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0])
raise NotImplementedError("Tensor rotate is not implemented yet!")
return F_t.rotate(
img, matrix=matrix, interpolation=interpolation.value, expand=expand, fill=fill
)
| 38.165986 | 117 | 0.645409 |
b7a915028c6ddb65e436ca5cdd25b368728be6d1
| 405 |
py
|
Python
|
ebenezer/atencion/migrations/0010_level_orden.py
|
davrv93/ebenezer-backend
|
d3db4dafd9a8c35bea9f32afe2be1dd451f64298
|
[
"Apache-2.0"
] | null | null | null |
ebenezer/atencion/migrations/0010_level_orden.py
|
davrv93/ebenezer-backend
|
d3db4dafd9a8c35bea9f32afe2be1dd451f64298
|
[
"Apache-2.0"
] | 3 |
2020-02-11T23:15:00.000Z
|
2021-06-10T20:52:17.000Z
|
ebenezer/atencion/migrations/0010_level_orden.py
|
davrv93/ebenezer-backend
|
d3db4dafd9a8c35bea9f32afe2be1dd451f64298
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.1.2 on 2018-10-05 17:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('atencion', '0009_level'),
]
operations = [
migrations.AddField(
model_name='level',
name='orden',
field=models.IntegerField(default=1),
preserve_default=False,
),
]
| 20.25 | 49 | 0.580247 |
4d2a3b1d6f0ad35aaed7ffb5e1ea9fabff02990f
| 4,115 |
py
|
Python
|
canopy.py
|
lizhanlian/CanopyByPython
|
89f94f7b2713c536afbcbded43534b28977cfb24
|
[
"MIT"
] | 37 |
2018-06-06T05:58:54.000Z
|
2022-02-21T04:49:54.000Z
|
Canopy/canopy.py
|
YYangjlu/MachineLearningNote
|
d34a9b57af3b2c6f276d14c2a7a3dccadb585421
|
[
"MIT"
] | null | null | null |
Canopy/canopy.py
|
YYangjlu/MachineLearningNote
|
d34a9b57af3b2c6f276d14c2a7a3dccadb585421
|
[
"MIT"
] | 35 |
2018-10-01T16:12:34.000Z
|
2022-02-21T03:35:33.000Z
|
# -*- coding: utf-8 -*-
# @Author: Alan Lau
# @Date: 2017-09-05 22:56:16
# @Last Modified by: Alan Lau
# @Last Modified time: 2017-09-05 22:56:16
import math
import random
import numpy as np
from datetime import datetime
from pprint import pprint as p
import matplotlib.pyplot as plt
# 随机生成500个二维[0,1)平面点
dataset = np.random.rand(500, 2)
class Canopy:
def __init__(self, dataset):
self.dataset = dataset
self.t1 = 0
self.t2 = 0
# 设置初始阈值
def setThreshold(self, t1, t2):
if t1 > t2:
self.t1 = t1
self.t2 = t2
else:
print('t1 needs to be larger than t2!')
# 使用欧式距离进行距离的计算
def euclideanDistance(self, vec1, vec2):
return math.sqrt(((vec1 - vec2)**2).sum())
# 根据当前dataset的长度随机选择一个下标
def getRandIndex(self):
return random.randint(0, len(self.dataset) - 1)
def clustering(self):
if self.t1 == 0:
print('Please set the threshold.')
else:
canopies = [] # 用于存放最终归类结果
# while len(self.dataset) != 0:
# 20180324修改
while len(self.dataset) > 1:
rand_index = self.getRandIndex()
current_center = self.dataset[rand_index] # 随机获取一个中心点,定为P点
current_center_list = [] # 初始化P点的canopy类容器
delete_list = [] # 初始化P点的删除容器
self.dataset = np.delete(self.dataset, rand_index,
0) # 删除随机选择的中心点P
for datum_j in range(len(self.dataset)):
datum = self.dataset[datum_j]
distance = self.euclideanDistance(
current_center, datum) # 计算选取的中心点P到每个点之间的距离
if distance < self.t1:
# 若距离小于t1,则将点归入P点的canopy类
current_center_list.append(datum)
if distance < self.t2:
delete_list.append(datum_j) # 若小于t2则归入删除容器
# 根据删除容器的下标,将元素从数据集中删除
self.dataset = np.delete(self.dataset, delete_list, 0)
canopies.append((current_center, current_center_list))
return canopies
def showCanopy(canopies, dataset, t1, t2):
fig = plt.figure()
sc = fig.add_subplot(111)
colors = [
'brown', 'green', 'blue', 'y', 'r', 'tan', 'dodgerblue', 'deeppink',
'orangered', 'peru', 'blue', 'y', 'r', 'gold', 'dimgray', 'darkorange',
'peru', 'blue', 'y', 'r', 'cyan', 'tan', 'orchid', 'peru', 'blue', 'y',
'r', 'sienna'
]
markers = [
'*', 'h', 'H', '+', 'o', '1', '2', '3', ',', 'v', 'H', '+', '1', '2',
'^', '<', '>', '.', '4', 'H', '+', '1', '2', 's', 'p', 'x', 'D', 'd',
'|', '_'
]
for i in range(len(canopies)):
canopy = canopies[i]
center = canopy[0]
components = canopy[1]
sc.plot(
center[0],
center[1],
marker=markers[i],
color=colors[i],
markersize=10)
t1_circle = plt.Circle(
xy=(center[0], center[1]),
radius=t1,
color='dodgerblue',
fill=False)
t2_circle = plt.Circle(
xy=(center[0], center[1]), radius=t2, color='skyblue', alpha=0.2)
sc.add_artist(t1_circle)
sc.add_artist(t2_circle)
for component in components:
sc.plot(
component[0],
component[1],
marker=markers[i],
color=colors[i],
markersize=1.5)
maxvalue = np.amax(dataset)
minvalue = np.amin(dataset)
plt.xlim(minvalue - t1, maxvalue + t1)
plt.ylim(minvalue - t1, maxvalue + t1)
plt.show()
def main():
t1 = 0.6
t2 = 0.4
gc = Canopy(dataset)
gc.setThreshold(t1, t2)
canopies = gc.clustering()
print('Get %s initial centers.' % len(canopies))
showCanopy(canopies, dataset, t1, t2)
if __name__ == '__main__':
t_s = datetime.now()
main()
t_e = datetime.now()
usedtime = t_e - t_s
print('[%s]' % usedtime)
| 31.174242 | 79 | 0.513487 |
4d58cac56450dac5968ed2736596681fdac1a542
| 2,389 |
py
|
Python
|
src/doc/tutorials/tutorial_examples/scripting/listing_8_extracting_aggregate_values.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/doc/tutorials/tutorial_examples/scripting/listing_8_extracting_aggregate_values.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/doc/tutorials/tutorial_examples/scripting/listing_8_extracting_aggregate_values.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
###########################################
# file: VisIt Scripting Tutorial Listing 8
###########################################
#
# 1) Run visit from the CLI as described below.
#
####################
# Example that demonstrates looping over a dataset
# to extract an aggregate value at each timestep.
#
# visit -nowin -cli -s listing_8_extracting_aggregate_values.py wave.visit pressure wave_pressure_out
####################
import sys
from visit_utils import *
def setup_plot(dbname,varname, materials = None):
"""
Create a plot to query.
"""
OpenDatabase(dbname)
AddPlot("Pseudocolor",varname)
if not materials is None:
TurnMaterialsOff()
# select the materials (by id )
# example
# materials = [ "1", "2", "4"]
for material in materials:
TurnMaterialsOn(materials)
DrawPlots()
def extract_curve(varname,obase,stride=1):
"""
Loop over all time steps and extract a value at each one.
"""
f = open(obase + ".ult","w")
f.write("# %s vs time\n" % varname)
nts = TimeSliderGetNStates()
for ts in range(0,nts,stride):
print("processing timestate: %d" % ts)
TimeSliderSetState(ts)
tval = query("Time")
# sums plotted variable scaled by
# area (2D mesh),
# revolved_volume (2D RZ mesh, or
# volume (3D mesh)
rval = query("Weighted Variable Sum")
# or you can use other queries, such as max:
# mval = query("Maximum")
res = "%s %s\n" % (str(tval),str(rval))
print(res)
f.write(res)
f.close()
def open_engine():
# to open an parallel engine
# inside of an mxterm or batch script use:
engine.open(method="slurm")
# outside of an mxterm or batch script
# engine.open(nprocs=21)
def main():
nargs = len(sys.argv)
if nargs < 4:
usage_msg = "usage: visit -nowin -cli -s visit_extract_curve.py "
usage_msg += "{databsase_name} {variable_name} {output_file_base}"
print(usage_msg)
sys.exit(-1)
# get our args
dbname = sys.argv[1]
varname = sys.argv[2]
obase = sys.argv[3]
# if you need a parallel engine:
# open_engine()
setup_plot(dbname,varname)
extract_curve(varname,obase)
if __visit_script_file__ == __visit_source_file__:
main()
sys.exit(0)
| 27.77907 | 102 | 0.583089 |
4db6fb4ee5c0300f7aef13f6f4f783c3cb480226
| 254 |
py
|
Python
|
Curso_Python/Secao4-Python-introducao-a-programacao-orientada-a-objetos-POO/092_classes-POO/treinando/main.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao4-Python-introducao-a-programacao-orientada-a-objetos-POO/092_classes-POO/treinando/main.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao4-Python-introducao-a-programacao-orientada-a-objetos-POO/092_classes-POO/treinando/main.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
from treinando import IndoShow
p1 = IndoShow()
# Ingresso acabou
p1.fila()
p1.fila()
p1.acabou()
p1.comprando_ingresso()
print('#' * 30)
# Tem o ingresso
p1.fila()
p1.fila()
p1.comprando_ingresso()
p1.transporte('719')
p1.durante_show('foda'.upper())
| 14.111111 | 31 | 0.708661 |
422213a77508b25409e7cf9fd8e17698faf731df
| 377 |
py
|
Python
|
DeepRTS/python/test/test_deeprts.py
|
Yigit-Arisoy/deep-rts
|
a5ed2c29b76789830df9f7075480c7229ccf0f4d
|
[
"MIT"
] | 144 |
2018-07-13T07:47:50.000Z
|
2022-03-31T06:29:50.000Z
|
DeepRTS/python/test/test_deeprts.py
|
Yigit-Arisoy/deep-rts
|
a5ed2c29b76789830df9f7075480c7229ccf0f4d
|
[
"MIT"
] | 18 |
2019-03-29T10:37:01.000Z
|
2022-03-02T12:47:34.000Z
|
DeepRTS/python/test/test_deeprts.py
|
Yigit-Arisoy/deep-rts
|
a5ed2c29b76789830df9f7075480c7229ccf0f4d
|
[
"MIT"
] | 23 |
2018-11-02T18:12:51.000Z
|
2022-02-15T20:32:18.000Z
|
import unittest
from DeepRTS import python
from DeepRTS import Engine
class TestDeepRTSGame(unittest.TestCase):
def setUp(self) -> None:
self.game = python.Game(
python.Config.Map.FIFTEEN,
n_players=1,
engine_config=None,
gui_config=None,
)
def test_1(self):
self.assertTrue(self.game.players)
| 22.176471 | 42 | 0.623342 |
35f64e5ea885dff7a24fde0bb4e276d16b71487a
| 373 |
py
|
Python
|
leetcode/141-Linked-List-Cycle/LinkedListCycle_002.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
leetcode/141-Linked-List-Cycle/LinkedListCycle_002.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/141-Linked-List-Cycle/LinkedListCycle_002.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution:
# @param head, a ListNode
# @return a boolean
def hasCycle(self, head):
if head == None or head.next == None:
return False
slow = fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
return True
return False
| 26.642857 | 45 | 0.509383 |
6792ab9177e5f849432e7e1487362888318a09f4
| 92 |
py
|
Python
|
2014/11/student-debt/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 14 |
2015-05-08T13:41:51.000Z
|
2021-02-24T12:34:55.000Z
|
2014/11/student-debt/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | null | null | null |
2014/11/student-debt/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 7 |
2015-04-04T04:45:54.000Z
|
2021-02-18T11:12:48.000Z
|
#!/usr/bin/env python
COPY_GOOGLE_DOC_KEY = '14muD-D_AKwFCTeXRSoWx8pmbFXwQRdQjUFC6S0Uu_FA'
| 23 | 68 | 0.836957 |
67a17be4cb65404aae1822aa913066e8852c12de
| 14,425 |
py
|
Python
|
Solver.py
|
timwuu/SokobanSolver
|
ae6d73516efa70fbf56ed4ca920b5ddc427d095d
|
[
"MIT"
] | null | null | null |
Solver.py
|
timwuu/SokobanSolver
|
ae6d73516efa70fbf56ed4ca920b5ddc427d095d
|
[
"MIT"
] | null | null | null |
Solver.py
|
timwuu/SokobanSolver
|
ae6d73516efa70fbf56ed4ca920b5ddc427d095d
|
[
"MIT"
] | null | null | null |
import numpy as np
import hashlib
import copy
import datetime
import threading
# Notation: [ROW][COL]
# Note: Add Forbidden Cells to improve the efficiency
# Check duplicate state in the search tree, keep DEPTH info
# Add Progress Monitoring
# ?Store Search Nodes for next batch
# ?Add Heuristic Move
# ?Multithreading
# ?Save Node while max steps/depths exceeded
# DEFINE:
# map[][]:
# -1: WALL
# -2: BOX
# -3: PLAYER
# -9: BLANK
MAP_BLANK = -9
MAX_STEP_COUNT_LST_SIZE = 256
MAX_STEPS = 28
MAX_DEPTH = 6
MAP_ROW = 8
MAP_COL = 8
FORBIDDEN = [[1,4],[1,5],[2,1],[3,1],[4,6],[5,6],[7,2],[7,3]]
g_para_total_state_searched = 0
g_para_max_exceeded = 0
g_para_duplicate_state_count = 0
g_para_duplicate_state_count2 = 0
g_progress = 0.0
g_progress_prv_time = datetime.datetime.now()
g_tm_CountSteps2 = datetime.timedelta(0)
g_lst_1 = np.empty((MAX_STEP_COUNT_LST_SIZE, 2),dtype='u2')
g_lst_2 = np.empty((MAX_STEP_COUNT_LST_SIZE, 2),dtype='u2')
g_para_tm_start = datetime.datetime.now()
g_para_tm_diff = datetime.timedelta(0)
def isNotForbidden( pos):
return ( pos not in FORBIDDEN )
def g_tm_start():
global g_para_tm_start
g_para_tm_start = datetime.datetime.now()
def g_tm_add():
global g_para_tm_start
global g_para_tm_diff
g_para_tm_diff += datetime.datetime.now() - g_para_tm_start
def g_tm_print( func_name):
global g_para_tm_diff
print( "Time Diff ({}): {}".format(func_name, g_para_tm_diff))
class STATE:
def __init__( self):
pass
def setup( self, mapstr):
i=-1
j=0
box=[]
wall=[]
for c in mapstr:
if( c=='#'):
i=i+1
j=0
continue
if( c=='P'):
player = [i,j]
j=j+1
continue
if( c=='W'):
wall.append([i,j])
j=j+1
continue
if( c=='B'):
box.append([i,j])
j=j+1
continue
j=j+1
self.set_wall( wall)
self.set_box( box)
self.set_player( player)
#print( self._player)
#print( self._wall)
#print( self._box)
def set_goal( self, lst):
self._goal = np.array( lst, dtype='b')
def set_box( self, lst):
self._box = np.array( lst, dtype='b')
def set_player( self, pos):
self._player = np.array( pos, dtype='b')
def set_wall( self, lst):
self._wall = np.array( lst, dtype='b')
def get_hexdigest( self):
m = hashlib.sha256()
m.update( self._player.tobytes())
#TODO: possible different orders for same positions of boxes
m.update( self._box.tobytes())
return m.hexdigest()
# print( "Move Box:", box_no, "Steps:", steps, "Dir:", mov_dir)
def moveBox( self, box_no, mov_dir):
self._player[0] = self._box[box_no][0]
self._player[1] = self._box[box_no][1]
self._box[box_no,0] += mov_dir[0]
self._box[box_no,1] += mov_dir[1]
def matchGoal( self, goal):
for elem in self._box:
if( [elem[0],elem[1]] not in goal):
return False
return True
def CountSteps2( map, state):
global g_tm_CountSteps2
tm_tmp = datetime.datetime.now()
i=1
lst = [[state._player[0],state._player[1]]]
while( len(lst) ):
next_lst = []
#print("step:", i)
for x,y in lst:
if( map[x-1,y]== MAP_BLANK): #UP
map[x-1,y] = i
next_lst.append([x-1,y])
if( map[x+1,y]== MAP_BLANK): #DOWN
map[x+1,y] = i
next_lst.append([x+1,y])
if( map[x,y-1]== MAP_BLANK): #LEFT
map[x,y-1] = i
next_lst.append([x,y-1])
if( map[x,y+1]== MAP_BLANK): #RIGHT
map[x,y+1] = i
next_lst.append([x,y+1])
lst = next_lst
#print( lst)
i=i+1
pass
map[state._player[0],state._player[1]] = 0
g_tm_CountSteps2 += datetime.datetime.now() - tm_tmp
pass
def CountSteps( map, state):
#print( state.get_hexdigest())
#map2 = map.copy()
# Add BOX, PLAYER to map
for val in state._box:
map[val[0],val[1]]= -2
map[state._player[0],state._player[1]] = -3
#print( map)
CountSteps2( map, state)
pass
def SearchEligibleMoves( map, state, moves, log):
i= -1
# Try to move the same box first
if(len(log)):
i = log[-1][0] # last moved box_no
#lst_mov_dir = log[-1][2]
x, y = state._box[i]
_U = map[x-1,y]
_D = map[x+1,y]
_L = map[x,y-1]
_R = map[x,y+1]
if( _U>=0 and _D>=0 ): #UP/DOWN
if( isNotForbidden([x-1,y])):
moves.append([[x+1,y],[-1,0],_D, i])
if( isNotForbidden([x+1,y])):
moves.append([[x-1,y],[1,0],_U, i])
else:
if( _U== MAP_BLANK and _D>=0 ): #UP
if( isNotForbidden([x-1,y])):
moves.append([[x+1,y],[-1,0],_D, i])
if( _U>=0 and _D== MAP_BLANK ): #DOWN
if( isNotForbidden([x+1,y])):
moves.append([[x-1,y],[1,0],_U, i])
if( _L>=0 and _R>=0): #LEFT/RIGHT
if( isNotForbidden([x,y-1])):
moves.append([[x,y+1],[0,-1],_R, i])
if( isNotForbidden([x,y+1])):
moves.append([[x,y-1],[0,1],_L, i])
else:
if( _L== MAP_BLANK and _R>=0): #LEFT
if( isNotForbidden([x,y-1])):
moves.append([[x,y+1],[0,-1],_R, i])
if( _L>=0 and _R== MAP_BLANK): #RIGHT
if( isNotForbidden([x,y+1])):
moves.append([[x,y-1],[0,1],_L, i])
j=i
for i, elem in enumerate( state._box):
if(j==i):
continue
x, y = elem
_U = map[x-1,y]
_D = map[x+1,y]
_L = map[x,y-1]
_R = map[x,y+1]
if( _U>=0 and _D>=0 ): #UP/DOWN
if( isNotForbidden([x-1,y])):
moves.append([[x+1,y],[-1,0],_D, i])
if( isNotForbidden([x+1,y])):
moves.append([[x-1,y],[1,0],_U, i])
else:
if( _U== MAP_BLANK and _D>=0 ): #UP
if( isNotForbidden([x-1,y])):
moves.append([[x+1,y],[-1,0],_D, i])
if( _U>=0 and _D== MAP_BLANK ): #DOWN
if( isNotForbidden([x+1,y])):
moves.append([[x-1,y],[1,0],_U, i])
if( _L>=0 and _R>=0): #LEFT/RIGHT
if( isNotForbidden([x,y-1])):
moves.append([[x,y+1],[0,-1],_R, i])
if( isNotForbidden([x,y+1])):
moves.append([[x,y-1],[0,1],_L, i])
else:
if( _L== MAP_BLANK and _R>=0): #LEFT
if( isNotForbidden([x,y-1])):
moves.append([[x,y+1],[0,-1],_R, i])
if( _L>=0 and _R== MAP_BLANK): #RIGHT
if( isNotForbidden([x,y+1])):
moves.append([[x,y-1],[0,1],_L, i])
pass
def Solve( state, goal):
# map : WALLS ONLY
# map = np.zeros((MAP_ROW, MAP_COL),dtype='b')
map = np.full((MAP_ROW, MAP_COL), fill_value=MAP_BLANK, dtype='b')
for val in state._wall:
map[val[0],val[1]]= -1
trace = {}
log = []
if( not Solve2( map, state, goal, 0, 0, trace, log, 100.0)):
print( "Cannot Solve!")
global g_para_total_state_searched
g_para_total_state_searched = len(trace)
def Solve2( map, state, goal, depth, total_steps, trace, log, progress_slot):
if( total_steps> MAX_STEPS or depth> MAX_DEPTH):
global g_para_max_exceeded
g_para_max_exceeded += 1
output_progress( progress_slot) # END_NODE
return False
# map2 : WALLS plus STEP COUNT
map2 = map.copy()
#Count steps to reachable blank squares
CountSteps( map2, state)
#print( map2)
#Remove illegible moves for the BOX
moves=[] # list of [ targetPlayerPosition, moveDirection, steps, box no]
SearchEligibleMoves( map2, state, moves, log)
#print(moves)
if( len(moves)):
mv_progress_slot = progress_slot/len(moves)
else:
output_progress( progress_slot) # END_NODE
#Try each possible move
for i_mov, mov in enumerate(moves):
#if( depth<2): print( depth, mov, mv_progress_slot)
steps = mov[2]
box_no = mov[3]
mov_dir = mov[1]
g_tm_start()
new_state = copy.deepcopy(state)
g_tm_add()
new_state.moveBox( box_no, mov_dir)
#print( box_no, mov_dir)
#check if meet goal
if( new_state.matchGoal(goal)):
print( "Reach Goals!")
print( "Depth:", depth+1)
print( "Total Steps:", total_steps+steps+1)
log.append([box_no, steps, mov_dir, i_mov])
for l in log:
print( " Move Box: {:d} Steps: {:d} Dir: {} i: {}".format(l[0],l[1],l[2],l[3]))
return True
#check if new_state is duplicate
key = new_state.get_hexdigest()
if( key in trace):
#print( "duplicate state!")
global g_para_duplicate_state_count
global g_para_duplicate_state_count2
g_para_duplicate_state_count += 1
if( trace[key] < depth+1):
g_para_duplicate_state_count2 += 1
output_progress( mv_progress_slot) # END_NODE
continue
log.append([box_no, steps, mov_dir, i_mov])
trace[key] = depth+1
#print( new_state.get_hexdigest())
#start a new node for search
if( Solve2( map, new_state, goal, depth+1, total_steps+steps+1, trace, log, mv_progress_slot)):
return True
#log.pop()
#continue #Find next alternative solution
else:
log.pop()
#output_progress( mv_progress_slot)
#trace.pop(key)
continue
return False
def output_progress( progress):
global g_progress
global g_progress_prv_time
g_progress += progress
tmp = datetime.datetime.now()
if( tmp - g_progress_prv_time > datetime.timedelta(seconds=2.0)):
print( "progress: {:.4f}%".format(g_progress))
g_progress_prv_time = tmp
s = STATE()
mapstr = "#---WWWW-"+"#WWWW PW-"+"#W B W-"+"#W B WW"+"#WWB B W"+"#-W B W"+"#-W WWWW"+"#-WWWW---"
goal = [[3,3],[3,4],[3,5],[4,4],[4,5]]
goal = [[3,4],[3,2],[4,2],[4,4],[5,4]] # one step
MAX_STEPS = 21
MAX_DEPTH = 5
goal = [[2,5],[3,2],[4,2],[4,4],[5,4]] # one step
# goal = [[3,5],[3,2],[4,2],[4,4],[5,4]] # two steps
# goal = [[3,5],[3,2],[4,2],[4,4],[5,3]] # two steps
# MAX_STEPS = 21
# MAX_DEPTH = 4
# goal = [[3,5],[3,3],[4,2],[4,4],[5,3]]
# goal = [[3,4],[3,3],[4,2],[4,4],[5,5]] # two steps
# goal = [[3,4],[3,3],[4,2],[4,3],[5,5]] # two steps
# goal = [[3,4],[3,3],[3,2],[4,3],[5,5]] # two steps
# Time Used:0:00:01.915810
# MAX_STEPS = 28
# MAX_DEPTH = 6
# goal = [[3,4],[3,3],[2,2],[4,3],[5,5]]
# Time Used: 0:00:17.317066
# Time Used (g_tm_CountSteps2): 0:00:05.415582
# Total State Searched: 18628
# Total Max Exceeded: 111053
# Duplicate Key Count : 156954
# Duplicate Key Count2: 26714
MAX_STEPS = 31
MAX_DEPTH = 8
goal = [[3,4],[3,3],[2,4],[4,3],[5,5]]
# Time Used: 0:00:46.802952
# Time Used (g_tm_CountSteps2): 0:00:15.552429
# Total State Searched: 33324
# Total Max Exceeded: 276172
# Duplicate Key Count : 426214
# Duplicate Key Count2: 79402
# MAX_STEPS = 32
# MAX_DEPTH = 9
# goal = [[3,4],[3,3],[2,5],[4,3],[5,5]]
# Time Used: 0:01:46.428706
# Time Used (g_tm_CountSteps2): 0:00:34.125447
# Total State Searched: 53777
# Total Max Exceeded: 553840
# Duplicate Key Count : 941157
# Duplicate Key Count2: 202331
# Time Diff (STATE Copy): 0:00:15.649496
MAX_STEPS = 33
MAX_DEPTH = 10
goal = [[4,4],[3,3],[2,5],[4,3],[5,5]]
# Time Used: 0:21:59.884430
# Time Used (g_tm_CountSteps2): 0:07:51.087651
# Total State Searched: 184658
# Total Max Exceeded: 4987044
# Duplicate Key Count : 11314176
# Duplicate Key Count2: 3603415
# MAX_STEPS = 40
# MAX_DEPTH = 13
# goal = [[4,4],[3,3],[2,5],[4,3],[5,2]]
# # Time Used:
# MAX_STEPS = 45
# MAX_DEPTH = 16
# goal = [[4,4],[3,3],[2,5],[4,3],[2,2]]
# # Time Used:
# MAX_STEPS = 46
# MAX_DEPTH = 17
# goal = [[4,4],[3,4],[2,5],[4,3],[2,2]]
# # Time Used:
# MAX_STEPS = 52
# MAX_DEPTH = 19
# goal = [[4,4],[3,4],[4,5],[4,3],[2,2]]
# # Time Used:
# MAX_STEPS = 61
# MAX_DEPTH = 20
# goal = [[4,4],[3,4],[4,5],[3,3],[2,2]]
# # Time Used:
# MAX_STEPS = 71
# MAX_DEPTH = 24
# goal = [[4,4],[3,4],[4,5],[3,3],[3,5]]
s.setup( mapstr)
g_progress_prv_time = datetime.datetime.now()
start_time = datetime.datetime.now()
if True:
Solve( s, goal)
else:
x = threading.Thread( target=Solve, args=(s,goal))
x.start()
x.join()
diff_time = datetime.datetime.now() - start_time
print( "Time Used: {}".format(diff_time))
print( "Time Used (g_tm_CountSteps2): {}".format(g_tm_CountSteps2))
print( "Total State Searched: {}".format(g_para_total_state_searched))
print( "Total Max Exceeded: {}".format(g_para_max_exceeded))
print( "Duplicate Key Count : {}".format(g_para_duplicate_state_count))
print( "Duplicate Key Count2: {}".format(g_para_duplicate_state_count2))
g_tm_print("STATE Copy")
# Setup Map and State:{ Goal, Box, Player, Wall }
# Logs:
# Time Used:0:29:37.108837
# Total State Searched: 184,658
# Duplicate Key Count : 11,319,687
# Duplicate Key Count2: 3,602,166
# MAX_STEPS = 40
# MAX_DEPTH = 13
# goal = [[4,4],[3,3],[2,5],[4,3],[5,2]]
# Depth: 13
# Total Steps: 40
# Move Box: 0 Steps: 1 Dir: [1, 0] i: 0
# Move Box: 4 Steps: 4 Dir: [0, 1] i: 6
# Move Box: 1 Steps: 7 Dir: [0, 1] i: 3
# Move Box: 3 Steps: 6 Dir: [0, -1] i: 2
# Move Box: 2 Steps: 3 Dir: [-1, 0] i: 2
# Move Box: 2 Steps: 0 Dir: [-1, 0] i: 0
# Move Box: 2 Steps: 2 Dir: [0, 1] i: 0
# Move Box: 2 Steps: 0 Dir: [0, 1] i: 1
# Move Box: 2 Steps: 0 Dir: [0, 1] i: 1
# Move Box: 0 Steps: 0 Dir: [1, 0] i: 2
# Move Box: 4 Steps: 4 Dir: [0, -1] i: 5
# Move Box: 4 Steps: 0 Dir: [0, -1] i: 0
# Move Box: 4 Steps: 0 Dir: [0, -1] i: 0
| 25.530973 | 104 | 0.538094 |
fbf2982b96cd59bdb1694db2faea05735a1f8913
| 227 |
py
|
Python
|
Contests/CCC/CCC '18 J1 - Telemarketer or not.py
|
MastaCoder/Projects
|
ebb0a3134522b12f052fec8d753005f384adf1b1
|
[
"MIT"
] | 5 |
2018-10-11T01:55:40.000Z
|
2021-12-25T23:38:22.000Z
|
Contests/CCC/CCC '18 J1 - Telemarketer or not.py
|
MastaCoder/mini_projects
|
ebb0a3134522b12f052fec8d753005f384adf1b1
|
[
"MIT"
] | null | null | null |
Contests/CCC/CCC '18 J1 - Telemarketer or not.py
|
MastaCoder/mini_projects
|
ebb0a3134522b12f052fec8d753005f384adf1b1
|
[
"MIT"
] | 1 |
2019-02-22T14:42:50.000Z
|
2019-02-22T14:42:50.000Z
|
num = [0 for n in range(4)]
for i in range(4):
num[i] = int(input(""))
failed = False
if (num[0] == 8 or num[0] == 9) and (num[3] == 8 or num[3] == 9) and (num[1] == num[2]):
print("ignore")
else:
print("answer")
| 20.636364 | 88 | 0.515419 |
2225ca05574efa4c5b8f771ee48a67c6618094e5
| 846 |
py
|
Python
|
python/pyqt/LearnPyQt/q_list_box.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/pyqt/LearnPyQt/q_list_box.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/pyqt/LearnPyQt/q_list_box.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QMainWindow, QListWidget
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setWindowTitle("Jayone's Awesome App")
widget = QListWidget()
widget.addItems(['One', 'Two', 'Three'])
# In QListWidget there are two separate signals for the item,
# and str
widget.currentItemChanged.connect(self.index_changed)
widget.currentTextChanged.connect(self.text_changed)
self.setCentralWidget(widget)
def index_changed(self, i): # Not an index, i is a QListItem
print(i.text())
def text_changed(self, s):
print(s)
app = QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec_()
| 24.882353 | 69 | 0.670213 |
97eae9962de314f43d1518f7a3879454790ab9ac
| 1,221 |
py
|
Python
|
tests/test_rec.py
|
supcik/pymdown-include
|
fe426425e646d9111d230885b96d605fa78a62c0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_rec.py
|
supcik/pymdown-include
|
fe426425e646d9111d230885b96d605fa78a62c0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_rec.py
|
supcik/pymdown-include
|
fe426425e646d9111d230885b96d605fa78a62c0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Jacques Supcik
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
from pathlib import Path
import markdown
from pymdown_include import PymdownInclude
class RecursiveTests(unittest.TestCase):
def setUp(self):
self.md = markdown.Markdown(
extensions=[PymdownInclude(SEARCH_PATH=[Path(__file__).parent])]
)
def test_rec(self):
MD = """
A {!rec.md!} B
"""
HTML = """
<p>A X line 1
line 2
line 3 Y B</p>
"""
self.assertEqual(self.md.convert(
inspect.cleandoc(MD)), inspect.cleandoc(HTML))
if __name__ == '__main__':
unittest.main()
| 25.4375 | 76 | 0.661753 |
3f26aa0f6c35a62ad963ac41005256780bd39a53
| 612 |
py
|
Python
|
TEDx/Titles_starting_0_to_9/parse.py
|
gt-big-data/TEDVis
|
328a4c62e3a05c943b2a303817601aebf198c1aa
|
[
"MIT"
] | null | null | null |
TEDx/Titles_starting_0_to_9/parse.py
|
gt-big-data/TEDVis
|
328a4c62e3a05c943b2a303817601aebf198c1aa
|
[
"MIT"
] | null | null | null |
TEDx/Titles_starting_0_to_9/parse.py
|
gt-big-data/TEDVis
|
328a4c62e3a05c943b2a303817601aebf198c1aa
|
[
"MIT"
] | 2 |
2018-02-06T00:00:44.000Z
|
2019-06-04T12:43:41.000Z
|
from os import listdir
import os
from os.path import isfile, join
from operator import itemgetter, attrgetter
def getWords()
onlyfiles = [f for f in listdir(".") if isfile(join(".", f))]
#print(onlyfiles)
#wordDict = {}
for file in onlyfiles:
with open(file) as f:
lines = f.readlines()
for line in lines:
line = line.split()
for word in line:
if word not in wordDict:
wordDict[word] = 0
wordDict[word] += 1
wordList = []
for key in wordDict:
wordList.append((wordDict[key], key))
wordList = sorted(wordList, key=itemgetter(0), reverse = True)
print(wordList)
| 19.125 | 63 | 0.661765 |
e1468d99ac818a6e67f218ca7a52aea9dae60a7e
| 3,040 |
py
|
Python
|
source/pkgsrc/lang/python27/patches/patch-Lib_ctypes_test_test__parameters.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pkgsrc/lang/python27/patches/patch-Lib_ctypes_test_test__parameters.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pkgsrc/lang/python27/patches/patch-Lib_ctypes_test_test__parameters.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
$NetBSD: patch-Lib_ctypes_test_test__parameters.py,v 1.1.2.2 2021/10/13 21:04:01 tm Exp $
Fix CVE-2021-3177: Replace snprintf with Python unicode formatting in ctypes param reprs
Via Fedora:
https://src.fedoraproject.org/rpms/python2.7/blob/rawhide/f/00357-CVE-2021-3177.patch
--- Lib/ctypes/test/test_parameters.py.orig 2020-04-19 21:13:39.000000000 +0000
+++ Lib/ctypes/test/test_parameters.py
@@ -206,6 +206,49 @@ class SimpleTypesTestCase(unittest.TestC
with self.assertRaises(ZeroDivisionError):
WorseStruct().__setstate__({}, b'foo')
+ def test_parameter_repr(self):
+ from ctypes import (
+ c_bool,
+ c_char,
+ c_wchar,
+ c_byte,
+ c_ubyte,
+ c_short,
+ c_ushort,
+ c_int,
+ c_uint,
+ c_long,
+ c_ulong,
+ c_longlong,
+ c_ulonglong,
+ c_float,
+ c_double,
+ c_longdouble,
+ c_char_p,
+ c_wchar_p,
+ c_void_p,
+ )
+ self.assertRegexpMatches(repr(c_bool.from_param(True)), r"^<cparam '\?' at 0x[A-Fa-f0-9]+>$")
+ self.assertEqual(repr(c_char.from_param('a')), "<cparam 'c' ('a')>")
+ self.assertRegexpMatches(repr(c_wchar.from_param('a')), r"^<cparam 'u' at 0x[A-Fa-f0-9]+>$")
+ self.assertEqual(repr(c_byte.from_param(98)), "<cparam 'b' (98)>")
+ self.assertEqual(repr(c_ubyte.from_param(98)), "<cparam 'B' (98)>")
+ self.assertEqual(repr(c_short.from_param(511)), "<cparam 'h' (511)>")
+ self.assertEqual(repr(c_ushort.from_param(511)), "<cparam 'H' (511)>")
+ self.assertRegexpMatches(repr(c_int.from_param(20000)), r"^<cparam '[li]' \(20000\)>$")
+ self.assertRegexpMatches(repr(c_uint.from_param(20000)), r"^<cparam '[LI]' \(20000\)>$")
+ self.assertRegexpMatches(repr(c_long.from_param(20000)), r"^<cparam '[li]' \(20000\)>$")
+ self.assertRegexpMatches(repr(c_ulong.from_param(20000)), r"^<cparam '[LI]' \(20000\)>$")
+ self.assertRegexpMatches(repr(c_longlong.from_param(20000)), r"^<cparam '[liq]' \(20000\)>$")
+ self.assertRegexpMatches(repr(c_ulonglong.from_param(20000)), r"^<cparam '[LIQ]' \(20000\)>$")
+ self.assertEqual(repr(c_float.from_param(1.5)), "<cparam 'f' (1.5)>")
+ self.assertEqual(repr(c_double.from_param(1.5)), "<cparam 'd' (1.5)>")
+ self.assertEqual(repr(c_double.from_param(1e300)), "<cparam 'd' (1e+300)>")
+ self.assertRegexpMatches(repr(c_longdouble.from_param(1.5)), r"^<cparam ('d' \(1.5\)|'g' at 0x[A-Fa-f0-9]+)>$")
+ self.assertRegexpMatches(repr(c_char_p.from_param(b'hihi')), "^<cparam 'z' \(0x[A-Fa-f0-9]+\)>$")
+ self.assertRegexpMatches(repr(c_wchar_p.from_param('hihi')), "^<cparam 'Z' \(0x[A-Fa-f0-9]+\)>$")
+ self.assertRegexpMatches(repr(c_void_p.from_param(0x12)), r"^<cparam 'P' \(0x0*12\)>$")
+
################################################################
if __name__ == '__main__':
| 51.525424 | 120 | 0.587171 |
e1c1bbfefae2fb9a99ece53845e322214f25de58
| 963 |
py
|
Python
|
src/onegov/wtfs/forms/daily_list.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/wtfs/forms/daily_list.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/wtfs/forms/daily_list.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from datetime import date
from onegov.form import Form
from onegov.wtfs import _
from onegov.wtfs.models import DailyListBoxes
from onegov.wtfs.models import DailyListBoxesAndForms
from wtforms import RadioField
from wtforms.fields.html5 import DateField
from wtforms.validators import InputRequired
class DailyListSelectionForm(Form):
date = DateField(
label=_("Date"),
validators=[InputRequired()],
default=date.today
)
type = RadioField(
label=_("Daily list"),
choices=[
('boxes', _("Boxes")),
('boxes_and_forms', _("Boxes and forms")),
],
validators=[InputRequired()],
default='boxes'
)
def get_model(self):
if self.type.data == 'boxes':
return DailyListBoxes(self.request.session, self.date.data)
if self.type.data == 'boxes_and_forms':
return DailyListBoxesAndForms(self.request.session, self.date.data)
| 28.323529 | 79 | 0.659398 |
3d5174f2a4cfda47eefc6a675bad3294c304b4a9
| 4,130 |
py
|
Python
|
Session01_MLOPs_Introduction_and_Version_Control/train.py
|
garima-mahato/TSAI_EMLO1.0
|
f1478572a20988296831e70d6cf1dac9b36e7573
|
[
"Apache-2.0"
] | null | null | null |
Session01_MLOPs_Introduction_and_Version_Control/train.py
|
garima-mahato/TSAI_EMLO1.0
|
f1478572a20988296831e70d6cf1dac9b36e7573
|
[
"Apache-2.0"
] | null | null | null |
Session01_MLOPs_Introduction_and_Version_Control/train.py
|
garima-mahato/TSAI_EMLO1.0
|
f1478572a20988296831e70d6cf1dac9b36e7573
|
[
"Apache-2.0"
] | null | null | null |
'''This script goes along the blog post
"Building powerful image classification models using very little data"
from blog.keras.io.
In our example we will be using data that can be downloaded at:
https://www.kaggle.com/tongpython/cat-and-dog
In our setup, it expects:
- a data/ folder
- train/ and validation/ subfolders inside data/
- cats/ and dogs/ subfolders inside train/ and validation/
- put the cat pictures index 0-X in data/train/cats
- put the cat pictures index 1000-1400 in data/validation/cats
- put the dogs pictures index 0-X in data/train/dogs
- put the dog pictures index 1000-1400 in data/validation/dogs
We have X training examples for each class, and 400 validation examples
for each class. In summary, this is our directory structure:
```
data/
train/
dogs/
dog001.jpg
dog002.jpg
...
cats/
cat001.jpg
cat002.jpg
...
validation/
dogs/
dog001.jpg
dog002.jpg
...
cats/
cat001.jpg
cat002.jpg
...
```
'''
import numpy as np
import sys
import os
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dropout, Flatten, Dense
from tensorflow.keras import applications
from tensorflow.keras.callbacks import CSVLogger
from tqdm.keras import TqdmCallback
pathname = os.path.dirname(sys.argv[0])
path = os.path.abspath(pathname)
# dimensions of our images.
img_width, img_height = 150, 150
top_model_weights_path = 'model.h5'
train_data_dir = os.path.join('data', 'train')
validation_data_dir = os.path.join('data', 'validation')
cats_train_path = os.path.join(path, train_data_dir, 'cats')
nb_train_samples = 2 * len([name for name in os.listdir(cats_train_path)
if os.path.isfile(
os.path.join(cats_train_path, name))])
nb_validation_samples = 800
epochs = 10
batch_size = 10
def save_bottlebeck_features():
datagen = ImageDataGenerator(rescale=1. / 255)
# build the VGG16 network
model = applications.VGG16(include_top=False, weights='imagenet')
generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
bottleneck_features_train = model.predict_generator(
generator, nb_train_samples // batch_size)
np.save(open('bottleneck_features_train.npy', 'wb'),
bottleneck_features_train)
generator = datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
bottleneck_features_validation = model.predict_generator(
generator, nb_validation_samples // batch_size)
np.save(open('bottleneck_features_validation.npy', 'wb'),
bottleneck_features_validation)
def train_top_model():
train_data = np.load(open('bottleneck_features_train.npy', 'rb'))
train_labels = np.array(
[0] * (int(nb_train_samples / 2)) + [1] * (int(nb_train_samples / 2)))
validation_data = np.load(open('bottleneck_features_validation.npy', 'rb'))
validation_labels = np.array(
[0] * (int(nb_validation_samples / 2)) +
[1] * (int(nb_validation_samples / 2)))
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy', metrics=['accuracy'])
model.fit(train_data, train_labels,
epochs=epochs,
batch_size=batch_size,
validation_data=(validation_data, validation_labels),
verbose=0,
callbacks=[TqdmCallback(), CSVLogger("metrics.csv")])
model.save_weights(top_model_weights_path)
save_bottlebeck_features()
train_top_model()
| 32.015504 | 79 | 0.676513 |
62ec2f94cbcbdd78f620bbef2daa3608432ad829
| 29,618 |
py
|
Python
|
Contrib-Inspur/openbmc/poky/scripts/lib/devtool/upgrade.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 5 |
2019-11-11T07:57:26.000Z
|
2022-03-28T08:26:53.000Z
|
Contrib-Inspur/openbmc/poky/scripts/lib/devtool/upgrade.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 3 |
2019-09-05T21:47:07.000Z
|
2019-09-17T18:10:45.000Z
|
Contrib-Inspur/openbmc/poky/scripts/lib/devtool/upgrade.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 11 |
2019-07-20T00:16:32.000Z
|
2022-01-11T14:17:48.000Z
|
# Development tool - upgrade command plugin
#
# Copyright (C) 2014-2017 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool upgrade plugin"""
import os
import sys
import re
import shutil
import tempfile
import logging
import argparse
import scriptutils
import errno
import bb
devtool_path = os.path.dirname(os.path.realpath(__file__)) + '/../../../meta/lib'
sys.path = sys.path + [devtool_path]
import oe.recipeutils
from devtool import standard
from devtool import exec_build_env_command, setup_tinfoil, DevtoolError, parse_recipe, use_external_build, update_unlockedsigs, check_prerelease_version
logger = logging.getLogger('devtool')
def _run(cmd, cwd=''):
logger.debug("Running command %s> %s" % (cwd,cmd))
return bb.process.run('%s' % cmd, cwd=cwd)
def _get_srctree(tmpdir):
srctree = tmpdir
dirs = os.listdir(tmpdir)
if len(dirs) == 1:
srctree = os.path.join(tmpdir, dirs[0])
return srctree
def _copy_source_code(orig, dest):
for path in standard._ls_tree(orig):
dest_dir = os.path.join(dest, os.path.dirname(path))
bb.utils.mkdirhier(dest_dir)
dest_path = os.path.join(dest, path)
shutil.move(os.path.join(orig, path), dest_path)
def _remove_patch_dirs(recipefolder):
for root, dirs, files in os.walk(recipefolder):
for d in dirs:
shutil.rmtree(os.path.join(root,d))
def _recipe_contains(rd, var):
rf = rd.getVar('FILE')
varfiles = oe.recipeutils.get_var_files(rf, [var], rd)
for var, fn in varfiles.items():
if fn and fn.startswith(os.path.dirname(rf) + os.sep):
return True
return False
def _rename_recipe_dirs(oldpv, newpv, path):
for root, dirs, files in os.walk(path):
# Rename directories with the version in their name
for olddir in dirs:
if olddir.find(oldpv) != -1:
newdir = olddir.replace(oldpv, newpv)
if olddir != newdir:
shutil.move(os.path.join(path, olddir), os.path.join(path, newdir))
# Rename any inc files with the version in their name (unusual, but possible)
for oldfile in files:
if oldfile.endswith('.inc'):
if oldfile.find(oldpv) != -1:
newfile = oldfile.replace(oldpv, newpv)
if oldfile != newfile:
os.rename(os.path.join(path, oldfile), os.path.join(path, newfile))
def _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path):
oldrecipe = os.path.basename(oldrecipe)
if oldrecipe.endswith('_%s.bb' % oldpv):
newrecipe = '%s_%s.bb' % (bpn, newpv)
if oldrecipe != newrecipe:
shutil.move(os.path.join(path, oldrecipe), os.path.join(path, newrecipe))
else:
newrecipe = oldrecipe
return os.path.join(path, newrecipe)
def _rename_recipe_files(oldrecipe, bpn, oldpv, newpv, path):
_rename_recipe_dirs(oldpv, newpv, path)
return _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path)
def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d):
"""Writes an append file"""
if not os.path.exists(rc):
raise DevtoolError("bbappend not created because %s does not exist" % rc)
appendpath = os.path.join(workspace, 'appends')
if not os.path.exists(appendpath):
bb.utils.mkdirhier(appendpath)
brf = os.path.basename(os.path.splitext(rc)[0]) # rc basename
srctree = os.path.abspath(srctree)
pn = d.getVar('PN')
af = os.path.join(appendpath, '%s.bbappend' % brf)
with open(af, 'w') as f:
f.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n\n')
f.write('inherit externalsrc\n')
f.write(('# NOTE: We use pn- overrides here to avoid affecting'
'multiple variants in the case where the recipe uses BBCLASSEXTEND\n'))
f.write('EXTERNALSRC_pn-%s = "%s"\n' % (pn, srctree))
b_is_s = use_external_build(same_dir, no_same_dir, d)
if b_is_s:
f.write('EXTERNALSRC_BUILD_pn-%s = "%s"\n' % (pn, srctree))
f.write('\n')
if rev:
f.write('# initial_rev: %s\n' % rev)
if copied:
f.write('# original_path: %s\n' % os.path.dirname(d.getVar('FILE')))
f.write('# original_files: %s\n' % ' '.join(copied))
return af
def _cleanup_on_error(rf, srctree):
rfp = os.path.split(rf)[0] # recipe folder
rfpp = os.path.split(rfp)[0] # recipes folder
if os.path.exists(rfp):
shutil.rmtree(b)
if not len(os.listdir(rfpp)):
os.rmdir(rfpp)
srctree = os.path.abspath(srctree)
if os.path.exists(srctree):
shutil.rmtree(srctree)
def _upgrade_error(e, rf, srctree):
if rf:
cleanup_on_error(rf, srctree)
logger.error(e)
raise DevtoolError(e)
def _get_uri(rd):
srcuris = rd.getVar('SRC_URI').split()
if not len(srcuris):
raise DevtoolError('SRC_URI not found on recipe')
# Get first non-local entry in SRC_URI - usually by convention it's
# the first entry, but not always!
srcuri = None
for entry in srcuris:
if not entry.startswith('file://'):
srcuri = entry
break
if not srcuri:
raise DevtoolError('Unable to find non-local entry in SRC_URI')
srcrev = '${AUTOREV}'
if '://' in srcuri:
# Fetch a URL
rev_re = re.compile(';rev=([^;]+)')
res = rev_re.search(srcuri)
if res:
srcrev = res.group(1)
srcuri = rev_re.sub('', srcuri)
return srcuri, srcrev
def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, keep_temp, tinfoil, rd):
"""Extract sources of a recipe with a new version"""
def __run(cmd):
"""Simple wrapper which calls _run with srctree as cwd"""
return _run(cmd, srctree)
crd = rd.createCopy()
pv = crd.getVar('PV')
crd.setVar('PV', newpv)
tmpsrctree = None
uri, rev = _get_uri(crd)
if srcrev:
rev = srcrev
if uri.startswith('git://'):
__run('git fetch')
__run('git checkout %s' % rev)
__run('git tag -f devtool-base-new')
md5 = None
sha256 = None
_, _, _, _, _, params = bb.fetch2.decodeurl(uri)
srcsubdir_rel = params.get('destsuffix', 'git')
if not srcbranch:
check_branch, check_branch_err = __run('git branch -r --contains %s' % srcrev)
get_branch = [x.strip() for x in check_branch.splitlines()]
# Remove HEAD reference point and drop remote prefix
get_branch = [x.split('/', 1)[1] for x in get_branch if not x.startswith('origin/HEAD')]
if 'master' in get_branch:
# If it is master, we do not need to append 'branch=master' as this is default.
# Even with the case where get_branch has multiple objects, if 'master' is one
# of them, we should default take from 'master'
srcbranch = ''
elif len(get_branch) == 1:
# If 'master' isn't in get_branch and get_branch contains only ONE object, then store result into 'srcbranch'
srcbranch = get_branch[0]
else:
# If get_branch contains more than one objects, then display error and exit.
mbrch = '\n ' + '\n '.join(get_branch)
raise DevtoolError('Revision %s was found on multiple branches: %s\nPlease provide the correct branch in the devtool command with "--srcbranch" or "-B" option.' % (srcrev, mbrch))
else:
__run('git checkout devtool-base -b devtool-%s' % newpv)
tmpdir = tempfile.mkdtemp(prefix='devtool')
try:
checksums, ftmpdir = scriptutils.fetch_url(tinfoil, uri, rev, tmpdir, logger, preserve_tmp=keep_temp)
except scriptutils.FetchUrlFailure as e:
raise DevtoolError(e)
if ftmpdir and keep_temp:
logger.info('Fetch temp directory is %s' % ftmpdir)
md5 = checksums['md5sum']
sha256 = checksums['sha256sum']
tmpsrctree = _get_srctree(tmpdir)
srctree = os.path.abspath(srctree)
srcsubdir_rel = os.path.relpath(tmpsrctree, tmpdir)
# Delete all sources so we ensure no stray files are left over
for item in os.listdir(srctree):
if item in ['.git', 'oe-local-files']:
continue
itempath = os.path.join(srctree, item)
if os.path.isdir(itempath):
shutil.rmtree(itempath)
else:
os.remove(itempath)
# Copy in new ones
_copy_source_code(tmpsrctree, srctree)
(stdout,_) = __run('git ls-files --modified --others --exclude-standard')
filelist = stdout.splitlines()
pbar = bb.ui.knotty.BBProgress('Adding changed files', len(filelist))
pbar.start()
batchsize = 100
for i in range(0, len(filelist), batchsize):
batch = filelist[i:i+batchsize]
__run('git add -A %s' % ' '.join(['"%s"' % item for item in batch]))
pbar.update(i)
pbar.finish()
useroptions = []
oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=rd)
__run('git %s commit -q -m "Commit of upstream changes at version %s" --allow-empty' % (' '.join(useroptions), newpv))
__run('git tag -f devtool-base-%s' % newpv)
(stdout, _) = __run('git rev-parse HEAD')
rev = stdout.rstrip()
if no_patch:
patches = oe.recipeutils.get_recipe_patches(crd)
if patches:
logger.warning('By user choice, the following patches will NOT be applied to the new source tree:\n %s' % '\n '.join([os.path.basename(patch) for patch in patches]))
else:
__run('git checkout devtool-patched -b %s' % branch)
skiptag = False
try:
__run('git rebase %s' % rev)
except bb.process.ExecutionError as e:
skiptag = True
if 'conflict' in e.stdout:
logger.warning('Command \'%s\' failed:\n%s\n\nYou will need to resolve conflicts in order to complete the upgrade.' % (e.command, e.stdout.rstrip()))
else:
logger.warning('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
if not skiptag:
if uri.startswith('git://'):
suffix = 'new'
else:
suffix = newpv
__run('git tag -f devtool-patched-%s' % suffix)
if tmpsrctree:
if keep_temp:
logger.info('Preserving temporary directory %s' % tmpsrctree)
else:
shutil.rmtree(tmpsrctree)
return (rev, md5, sha256, srcbranch, srcsubdir_rel)
def _add_license_diff_to_recipe(path, diff):
notice_text = """# FIXME: the LIC_FILES_CHKSUM values have been updated by 'devtool upgrade'.
# The following is the difference between the old and the new license text.
# Please update the LICENSE value if needed, and summarize the changes in
# the commit message via 'License-Update:' tag.
# (example: 'License-Update: copyright years updated.')
#
# The changes:
#
"""
commented_diff = "\n".join(["# {}".format(l) for l in diff.split('\n')])
with open(path, 'rb') as f:
orig_content = f.read()
with open(path, 'wb') as f:
f.write(notice_text.encode())
f.write(commented_diff.encode())
f.write("\n#\n\n".encode())
f.write(orig_content)
def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, srcsubdir_new, workspace, tinfoil, rd, license_diff, new_licenses):
"""Creates the new recipe under workspace"""
bpn = rd.getVar('BPN')
path = os.path.join(workspace, 'recipes', bpn)
bb.utils.mkdirhier(path)
copied, _ = oe.recipeutils.copy_recipe_files(rd, path, all_variants=True)
if not copied:
raise DevtoolError('Internal error - no files were copied for recipe %s' % bpn)
logger.debug('Copied %s to %s' % (copied, path))
oldpv = rd.getVar('PV')
if not newpv:
newpv = oldpv
origpath = rd.getVar('FILE')
fullpath = _rename_recipe_files(origpath, bpn, oldpv, newpv, path)
logger.debug('Upgraded %s => %s' % (origpath, fullpath))
newvalues = {}
if _recipe_contains(rd, 'PV') and newpv != oldpv:
newvalues['PV'] = newpv
if srcrev:
newvalues['SRCREV'] = srcrev
if srcbranch:
src_uri = oe.recipeutils.split_var_value(rd.getVar('SRC_URI', False) or '')
changed = False
replacing = True
new_src_uri = []
for entry in src_uri:
scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(entry)
if replacing and scheme in ['git', 'gitsm']:
branch = params.get('branch', 'master')
if rd.expand(branch) != srcbranch:
# Handle case where branch is set through a variable
res = re.match(r'\$\{([^}@]+)\}', branch)
if res:
newvalues[res.group(1)] = srcbranch
# We know we won't change SRC_URI now, so break out
break
else:
params['branch'] = srcbranch
entry = bb.fetch2.encodeurl((scheme, network, path, user, passwd, params))
changed = True
replacing = False
new_src_uri.append(entry)
if changed:
newvalues['SRC_URI'] = ' '.join(new_src_uri)
newvalues['PR'] = None
# Work out which SRC_URI entries have changed in case the entry uses a name
crd = rd.createCopy()
crd.setVar('PV', newpv)
for var, value in newvalues.items():
crd.setVar(var, value)
old_src_uri = (rd.getVar('SRC_URI') or '').split()
new_src_uri = (crd.getVar('SRC_URI') or '').split()
newnames = []
addnames = []
for newentry in new_src_uri:
_, _, _, _, _, params = bb.fetch2.decodeurl(newentry)
if 'name' in params:
newnames.append(params['name'])
if newentry not in old_src_uri:
addnames.append(params['name'])
# Find what's been set in the original recipe
oldnames = []
noname = False
for varflag in rd.getVarFlags('SRC_URI'):
if varflag.endswith(('.md5sum', '.sha256sum')):
name = varflag.rsplit('.', 1)[0]
if name not in oldnames:
oldnames.append(name)
elif varflag in ['md5sum', 'sha256sum']:
noname = True
# Even if SRC_URI has named entries it doesn't have to actually use the name
if noname and addnames and addnames[0] not in oldnames:
addnames = []
# Drop any old names (the name actually might include ${PV})
for name in oldnames:
if name not in newnames:
newvalues['SRC_URI[%s.md5sum]' % name] = None
newvalues['SRC_URI[%s.sha256sum]' % name] = None
if md5 and sha256:
if addnames:
nameprefix = '%s.' % addnames[0]
else:
nameprefix = ''
newvalues['SRC_URI[%smd5sum]' % nameprefix] = md5
newvalues['SRC_URI[%ssha256sum]' % nameprefix] = sha256
if srcsubdir_new != srcsubdir_old:
s_subdir_old = os.path.relpath(os.path.abspath(rd.getVar('S')), rd.getVar('WORKDIR'))
s_subdir_new = os.path.relpath(os.path.abspath(crd.getVar('S')), crd.getVar('WORKDIR'))
if srcsubdir_old == s_subdir_old and srcsubdir_new != s_subdir_new:
# Subdir for old extracted source matches what S points to (it should!)
# but subdir for new extracted source doesn't match what S will be
newvalues['S'] = '${WORKDIR}/%s' % srcsubdir_new.replace(newpv, '${PV}')
if crd.expand(newvalues['S']) == crd.expand('${WORKDIR}/${BP}'):
# It's the default, drop it
# FIXME what if S is being set in a .inc?
newvalues['S'] = None
logger.info('Source subdirectory has changed, dropping S value since it now matches the default ("${WORKDIR}/${BP}")')
else:
logger.info('Source subdirectory has changed, updating S value')
if license_diff:
newlicchksum = " ".join(["file://{}".format(l['path']) +
(";beginline={}".format(l['beginline']) if l['beginline'] else "") +
(";endline={}".format(l['endline']) if l['endline'] else "") +
(";md5={}".format(l['actual_md5'])) for l in new_licenses])
newvalues["LIC_FILES_CHKSUM"] = newlicchksum
_add_license_diff_to_recipe(fullpath, license_diff)
rd = tinfoil.parse_recipe_file(fullpath, False)
oe.recipeutils.patch_recipe(rd, fullpath, newvalues)
return fullpath, copied
def _check_git_config():
def getconfig(name):
try:
value = bb.process.run('git config --global %s' % name)[0].strip()
except bb.process.ExecutionError as e:
if e.exitcode == 1:
value = None
else:
raise
return value
username = getconfig('user.name')
useremail = getconfig('user.email')
configerr = []
if not username:
configerr.append('Please set your name using:\n git config --global user.name')
if not useremail:
configerr.append('Please set your email using:\n git config --global user.email')
if configerr:
raise DevtoolError('Your git configuration is incomplete which will prevent rebases from working:\n' + '\n'.join(configerr))
def _extract_licenses(srcpath, recipe_licenses):
licenses = []
for url in recipe_licenses.split():
license = {}
(type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
license['path'] = path
license['md5'] = parm.get('md5', '')
license['beginline'], license['endline'] = 0, 0
if 'beginline' in parm:
license['beginline'] = int(parm['beginline'])
if 'endline' in parm:
license['endline'] = int(parm['endline'])
license['text'] = []
with open(os.path.join(srcpath, path), 'rb') as f:
import hashlib
actual_md5 = hashlib.md5()
lineno = 0
for line in f:
lineno += 1
if (lineno >= license['beginline']) and ((lineno <= license['endline']) or not license['endline']):
license['text'].append(line.decode(errors='ignore'))
actual_md5.update(line)
license['actual_md5'] = actual_md5.hexdigest()
licenses.append(license)
return licenses
def _generate_license_diff(old_licenses, new_licenses):
need_diff = False
for l in new_licenses:
if l['md5'] != l['actual_md5']:
need_diff = True
break
if need_diff == False:
return None
import difflib
diff = ''
for old, new in zip(old_licenses, new_licenses):
for line in difflib.unified_diff(old['text'], new['text'], old['path'], new['path']):
diff = diff + line
return diff
def upgrade(args, config, basepath, workspace):
"""Entry point for the devtool 'upgrade' subcommand"""
if args.recipename in workspace:
raise DevtoolError("recipe %s is already in your workspace" % args.recipename)
if args.srcbranch and not args.srcrev:
raise DevtoolError("If you specify --srcbranch/-B then you must use --srcrev/-S to specify the revision" % args.recipename)
_check_git_config()
tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
try:
rd = parse_recipe(config, tinfoil, args.recipename, True)
if not rd:
return 1
pn = rd.getVar('PN')
if pn != args.recipename:
logger.info('Mapping %s to %s' % (args.recipename, pn))
if pn in workspace:
raise DevtoolError("recipe %s is already in your workspace" % pn)
if args.srctree:
srctree = os.path.abspath(args.srctree)
else:
srctree = standard.get_default_srctree(config, pn)
# try to automatically discover latest version and revision if not provided on command line
if not args.version and not args.srcrev:
version_info = oe.recipeutils.get_recipe_upstream_version(rd)
if version_info['version'] and not version_info['version'].endswith("new-commits-available"):
args.version = version_info['version']
if version_info['revision']:
args.srcrev = version_info['revision']
if not args.version and not args.srcrev:
raise DevtoolError("Automatic discovery of latest version/revision failed - you must provide a version using the --version/-V option, or for recipes that fetch from an SCM such as git, the --srcrev/-S option.")
standard._check_compatible_recipe(pn, rd)
old_srcrev = rd.getVar('SRCREV')
if old_srcrev == 'INVALID':
old_srcrev = None
if old_srcrev and not args.srcrev:
raise DevtoolError("Recipe specifies a SRCREV value; you must specify a new one when upgrading")
old_ver = rd.getVar('PV')
if old_ver == args.version and old_srcrev == args.srcrev:
raise DevtoolError("Current and upgrade versions are the same version")
if args.version:
if bb.utils.vercmp_string(args.version, old_ver) < 0:
logger.warning('Upgrade version %s compares as less than the current version %s. If you are using a package feed for on-target upgrades or providing this recipe for general consumption, then you should increment PE in the recipe (or if there is no current PE value set, set it to "1")' % (args.version, old_ver))
check_prerelease_version(args.version, 'devtool upgrade')
rf = None
license_diff = None
try:
logger.info('Extracting current version source...')
rev1, srcsubdir1 = standard._extract_source(srctree, False, 'devtool-orig', False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
old_licenses = _extract_licenses(srctree, rd.getVar('LIC_FILES_CHKSUM'))
logger.info('Extracting upgraded version source...')
rev2, md5, sha256, srcbranch, srcsubdir2 = _extract_new_source(args.version, srctree, args.no_patch,
args.srcrev, args.srcbranch, args.branch, args.keep_temp,
tinfoil, rd)
new_licenses = _extract_licenses(srctree, rd.getVar('LIC_FILES_CHKSUM'))
license_diff = _generate_license_diff(old_licenses, new_licenses)
rf, copied = _create_new_recipe(args.version, md5, sha256, args.srcrev, srcbranch, srcsubdir1, srcsubdir2, config.workspace_path, tinfoil, rd, license_diff, new_licenses)
except bb.process.CmdError as e:
_upgrade_error(e, rf, srctree)
except DevtoolError as e:
_upgrade_error(e, rf, srctree)
standard._add_md5(config, pn, os.path.dirname(rf))
af = _write_append(rf, srctree, args.same_dir, args.no_same_dir, rev2,
copied, config.workspace_path, rd)
standard._add_md5(config, pn, af)
update_unlockedsigs(basepath, workspace, args.fixed_setup, [pn])
logger.info('Upgraded source extracted to %s' % srctree)
logger.info('New recipe is %s' % rf)
if license_diff:
logger.info('License checksums have been updated in the new recipe; please refer to it for the difference between the old and the new license texts.')
finally:
tinfoil.shutdown()
return 0
def latest_version(args, config, basepath, workspace):
"""Entry point for the devtool 'latest_version' subcommand"""
tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
try:
rd = parse_recipe(config, tinfoil, args.recipename, True)
if not rd:
return 1
version_info = oe.recipeutils.get_recipe_upstream_version(rd)
# "new-commits-available" is an indication that upstream never issues version tags
if not version_info['version'].endswith("new-commits-available"):
logger.info("Current version: {}".format(version_info['current_version']))
logger.info("Latest version: {}".format(version_info['version']))
if version_info['revision']:
logger.info("Latest version's commit: {}".format(version_info['revision']))
else:
logger.info("Latest commit: {}".format(version_info['revision']))
finally:
tinfoil.shutdown()
return 0
def check_upgrade_status(args, config, basepath, workspace):
if not args.recipe:
logger.info("Checking the upstream status for all recipes may take a few minutes")
results = oe.recipeutils.get_recipe_upgrade_status(args.recipe)
for result in results:
# pn, update_status, current, latest, maintainer, latest_commit, no_update_reason
if args.all or result[1] != 'MATCH':
logger.info("{:25} {:15} {:15} {} {} {}".format( result[0],
result[2],
result[1] if result[1] != 'UPDATE' else (result[3] if not result[3].endswith("new-commits-available") else "new commits"),
result[4],
result[5] if result[5] != 'N/A' else "",
"cannot be updated due to: %s" %(result[6]) if result[6] else ""))
def register_commands(subparsers, context):
"""Register devtool subcommands from this plugin"""
defsrctree = standard.get_default_srctree(context.config)
parser_upgrade = subparsers.add_parser('upgrade', help='Upgrade an existing recipe',
description='Upgrades an existing recipe to a new upstream version. Puts the upgraded recipe file into the workspace along with any associated files, and extracts the source tree to a specified location (in case patches need rebasing or adding to as a result of the upgrade).',
group='starting')
parser_upgrade.add_argument('recipename', help='Name of recipe to upgrade (just name - no version, path or extension)')
parser_upgrade.add_argument('srctree', nargs='?', help='Path to where to extract the source tree. If not specified, a subdirectory of %s will be used.' % defsrctree)
parser_upgrade.add_argument('--version', '-V', help='Version to upgrade to (PV). If omitted, latest upstream version will be determined and used, if possible.')
parser_upgrade.add_argument('--srcrev', '-S', help='Source revision to upgrade to (useful when fetching from an SCM such as git)')
parser_upgrade.add_argument('--srcbranch', '-B', help='Branch in source repository containing the revision to use (if fetching from an SCM such as git)')
parser_upgrade.add_argument('--branch', '-b', default="devtool", help='Name for new development branch to checkout (default "%(default)s")')
parser_upgrade.add_argument('--no-patch', action="store_true", help='Do not apply patches from the recipe to the new source code')
parser_upgrade.add_argument('--no-overrides', '-O', action="store_true", help='Do not create branches for other override configurations')
group = parser_upgrade.add_mutually_exclusive_group()
group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
parser_upgrade.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
parser_upgrade.set_defaults(func=upgrade, fixed_setup=context.fixed_setup)
parser_latest_version = subparsers.add_parser('latest-version', help='Report the latest version of an existing recipe',
description='Queries the upstream server for what the latest upstream release is (for git, tags are checked, for tarballs, a list of them is obtained, and one with the highest version number is reported)',
group='info')
parser_latest_version.add_argument('recipename', help='Name of recipe to query (just name - no version, path or extension)')
parser_latest_version.set_defaults(func=latest_version)
parser_check_upgrade_status = subparsers.add_parser('check-upgrade-status', help="Report upgradability for multiple (or all) recipes",
description="Prints a table of recipes together with versions currently provided by recipes, and latest upstream versions, when there is a later version available",
group='info')
parser_check_upgrade_status.add_argument('recipe', help='Name of the recipe to report (omit to report upgrade info for all recipes)', nargs='*')
parser_check_upgrade_status.add_argument('--all', '-a', help='Show all recipes, not just recipes needing upgrade', action="store_true")
parser_check_upgrade_status.set_defaults(func=check_upgrade_status)
| 46.278125 | 328 | 0.616112 |
9a7b4d402db24796ec56540c1cb6044fc9897249
| 568 |
py
|
Python
|
leetcode/047-Permutations-II/Perm2_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2017-05-18T06:11:02.000Z
|
2017-05-18T06:11:02.000Z
|
leetcode/047-Permutations-II/Perm2_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/047-Permutations-II/Perm2_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution:
# @param {integer[]} nums
# @return {integer[][]}
def permuteUnique(self, nums):
if len(nums) == 0:
return nums
if len(nums) == 1:
return [nums]
res = []
bag = set()
for i in range(len(nums)):
if nums[i] not in bag:
tmp = nums[:]
head = tmp.pop(i)
tail = self.permuteUnique(tmp)
[t.insert(0, head) for t in tail]
res.extend(tail)
bag.add(nums[i])
return res
| 25.818182 | 49 | 0.431338 |
9adb33ebc25743fa7aa526d82eedcac7bf67689a
| 3,450 |
py
|
Python
|
RDS/circle3_central_services/research_manager/src/tests/test_port.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 10 |
2020-06-24T08:22:24.000Z
|
2022-01-13T16:17:36.000Z
|
RDS/circle3_central_services/research_manager/src/tests/test_port.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 78 |
2020-01-23T14:32:06.000Z
|
2022-03-07T14:11:16.000Z
|
RDS/circle3_central_services/research_manager/src/tests/test_port.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 1 |
2020-06-24T08:33:48.000Z
|
2020-06-24T08:33:48.000Z
|
import unittest
from lib.Port import Port
class Test_Port(unittest.TestCase):
def test_port_init(self):
portSmall = Port("port-owncloud")
portOwncloud = Port("port-owncloud", fileStorage=True)
portInvenio = Port("port-invenio", fileStorage=True, metadata=True)
expected = {
"port": "port-owncloud",
"properties": []
}
self.assertEqual(portSmall.getDict(), expected)
expected = {
"port": "port-owncloud",
"properties":
[
{"portType": "fileStorage", "value": True}
]
}
self.assertEqual(portOwncloud.getDict(), expected)
expected = {
"port": "port-invenio",
"properties":
[
{"portType": "fileStorage", "value": True},
{"portType": "metadata", "value": True},
]
}
self.assertEqual(portInvenio.getDict(), expected)
def test_port_change(self):
portOwncloud = Port("port-owncloud")
expected = {
"port": "port-owncloud",
"properties": []
}
self.assertEqual(portOwncloud.getDict(), expected)
self.assertEqual(portOwncloud, Port("port-owncloud"))
with self.assertRaises(ValueError):
portOwncloud.setProperty(1, True)
portOwncloud.setProperty("fileStorage", 1)
self.assertFalse(portOwncloud.setProperty("not-found", True))
self.assertTrue(portOwncloud.setProperty("fileStorage", True))
expected = {
"port": "port-owncloud",
"properties":
[
{"portType": "fileStorage", "value": True}
]
}
self.assertEqual(portOwncloud.getDict(), expected)
self.assertEqual(portOwncloud, Port("port-owncloud", fileStorage=True))
portOwncloud.setProperty("metadata", True)
expected = {
"port": "port-owncloud",
"properties":
[
{"portType": "fileStorage", "value": True},
{"portType": "metadata", "value": True}
]
}
self.assertEqual(portOwncloud.getDict(), expected)
self.assertEqual(portOwncloud, Port(
"port-owncloud", fileStorage=True, metadata=True))
def test_port_customProperties(self):
custom = [
{
"key": "serviceProjectId",
"value": "12345"
}
]
expected = {
"port": "port-owncloud",
"properties":
[
{
"portType": "customProperties",
"value": custom
}
]
}
portOwncloud = Port("port-owncloud", customProperties=custom)
self.assertEqual(portOwncloud.getDict(), expected)
expected["port"] = "port-zenodo"
portZenodo = Port("port-zenodo")
portZenodo.setProperty("customProperties", custom)
self.assertEqual(portZenodo.getDict(), expected)
self.assertEqual(portZenodo, Port.fromDict(portZenodo.getDict()))
self.assertNotEqual(portOwncloud, Port.fromDict(portZenodo.getDict()))
self.assertNotEqual(Port("port-zenodo"),
Port.fromDict(portZenodo.getDict()))
| 31.081081 | 79 | 0.522899 |
7738a2201c4cade1bc245480457c84ed590c0f74
| 4,353 |
py
|
Python
|
scripts/minimax_plain_depth_analysis.py
|
lschmidt2507/Facharbeit2022
|
a40cb76302d2e569f7714e3bf9472206a3eaa253
|
[
"CC0-1.0"
] | null | null | null |
scripts/minimax_plain_depth_analysis.py
|
lschmidt2507/Facharbeit2022
|
a40cb76302d2e569f7714e3bf9472206a3eaa253
|
[
"CC0-1.0"
] | null | null | null |
scripts/minimax_plain_depth_analysis.py
|
lschmidt2507/Facharbeit2022
|
a40cb76302d2e569f7714e3bf9472206a3eaa253
|
[
"CC0-1.0"
] | null | null | null |
#minimax_plain_depth_analysis.py
import threading
import time
import math
from game_handler import printGame,generateTilesInfo,countPoints,readBoard,makeMove,undoMove,possibleMoves
startdepth=1
maxTime=30
global delayRunning
# funtion for stopping computation after a certain delay(is called using threading.timer)
def stopComputation():
global delayRunning
delayRunning=False
print("done!")
#actual minimax algorithm
def minimax(depth,tilesInfo,player,secLastX,secLastY,lastX,lastY,initcall):
global game
global board
global delayRunning
moves=possibleMoves(secLastX,secLastY,lastX,lastY)
totalEvaluations=0
# check if max depth reached or final game state and return evaluation if true
if(depth==0 or len(moves)==0):
# count point while adding decimal places to prefer longer games as the opponent has more opportunities to fail + return total evaluations 1
if(initcall):
return countPoints(tilesInfo)+depth/100,0,1
else:
return countPoints(tilesInfo)+depth/100,1
#check if delay running
if(not delayRunning):
if(initcall):
return 0,0,0
else:
return 0,0
else:
# maximizing player's turn
if(player=="max"):
maxEval=-1000
#try every possible move
for move in moves:
# make move and evaluate by calling recursively
makeMove(player,move)
evalu, evaluationNum=minimax(depth-1,tilesInfo,"min",lastX,lastY,move[0],move[1],False)
totalEvaluations+=evaluationNum
# undo move
undoMove(move)
# update bestmove and best evaluation
if(evalu>maxEval):
maxEval=evalu
bestmove=move
# if initial call return recommended move and check if delayrunning
if(initcall):
if(delayRunning):
return maxEval,bestmove, totalEvaluations
else:
return 0,0,0
else:
return maxEval, totalEvaluations
else:
#min player's turn
minEval=1000
#try every possible move
for move in moves:
# make move and evaluate by calling recursively
makeMove(player,move)
evalu,evaluationNum=minimax(depth-1,tilesInfo,"max",lastX,lastY,move[0],move[1],False)
totalEvaluations+=evaluationNum
# undo move
undoMove(move)
# update bestmove and best evaluation
if(evalu<minEval):
minEval=evalu
bestmove=move
# if initial call return recommended move check if delayRunning
if(initcall):
if(delayRunning):
return minEval,bestmove, totalEvaluations
else:
return 0,0,0
else:
return minEval, totalEvaluations
# read .board file and intilize global board and game variable
readBoard("./board.table")
tilesInfo=generateTilesInfo()
# make some arbitrary moves
makeMove("min",(2,0))
makeMove("max",(2,3))
makeMove("min",(5,3))
makeMove("max",(5,0))
makeMove("min",(3,0))
# set last move data
lastX=4
secLastX=4
lastY=3
secLastY=0
#start timer
delayRunning=True
threading.Timer(maxTime,stopComputation).start()
depth=1
# run while time left
while(delayRunning):
# call minimax with current depth
startTime = time.time()
tempevalu,tempmove,totalEvaluations = minimax(depth,tilesInfo,"max",secLastX,secLastY,lastX,lastY,True)
endTime = time.time()
# check if was interrupted(returns 0)
if(not tempmove==0):
try:
print(f"{depth} & {totalEvaluations} & {round(math.exp(math.log(totalEvaluations)/depth),2)} & {round(endTime-startTime,2)} & {round(totalEvaluations/round(endTime-startTime,2),2)}")
except Exception as e:
print(f"{depth} & {totalEvaluations} & {round(math.exp(math.log(totalEvaluations)/depth),2)} & {round(endTime-startTime,2)} & -")
# increase depth
depth+=1
| 35.390244 | 195 | 0.599586 |
f3228f2877394dfb94b3a165dcf99c3a6e3773b4
| 1,231 |
py
|
Python
|
INBa/2015/Shirlin_V_V/task_7_32.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/Shirlin_V_V/task_7_32.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/Shirlin_V_V/task_7_32.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
#Задание 3. Вариант 32
#Разработайте систему начисления очков для задачи 6, в соответствии с которой игрок получал бы большее количество баллов за меньшее количество попыток.
#Ширлин Вячеслав Викторович
#29.04.2016
import random
print ("Компьютер загадывает рандомно название одного из шести раздновидностей тигров ( Амурский тигр, Бенгальский тигр, Индокитайский тигр, Малайский тигр, Суматранский тигр, Южно-Китайский тигр.")
a = [" Амурский тигр", "Бенгальский тигр","Индокитайский тигр", "Малайский тигр", "Суматранский тиг","Южно-Китайский тигр"]
b = random.randint(0, 6)
i = 0
answer = ''
while b != answer:
i = i+1
answer = input('Назовите один из шести ?')
if i==1 and b == answer:
print ('Вы угадали!Ваш балл :100')
else :
print('К сожалению,Вы не угадали!У Вас осталось 2 попытки!Ваш балл:0')
input ("Повторите попытку : ")
if i==2 and b == answer:
print ('Вы угадали!Ваш балл :50')
else :
print('К сожалению,Вы не угадали!У Вас осталась 1 попытка!Ваш балл:0')
input ("Повторите попытку : ")
if i==3 and b == answer:
print ('Вы угадали!Ваш балл :25')
else :
print('К сожалению,Вы не угадали!У Вас больше не осталось попыток!Ваш балл:0')
break
input ("Нажмите Enter для выхода.")
| 37.30303 | 198 | 0.713241 |
b260d1bfbe283492c5b38264a3c0ee3b3f52d332
| 444 |
py
|
Python
|
config.py
|
galyeshua/Index
|
96e1630efc51d2c03f2d80889dfa1d117155e2ee
|
[
"MIT"
] | null | null | null |
config.py
|
galyeshua/Index
|
96e1630efc51d2c03f2d80889dfa1d117155e2ee
|
[
"MIT"
] | null | null | null |
config.py
|
galyeshua/Index
|
96e1630efc51d2c03f2d80889dfa1d117155e2ee
|
[
"MIT"
] | null | null | null |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
BOOTSTRAP_SERVE_LOCAL = True
DATA_FILENAME = "data.json"
MAX_CONTENT_LENGTH = 100 * 1024 * 1024 # max 100MB
UPLOAD_EXTENSIONS = ['.pdf', '.zip'] # Extentions with . before
| 31.714286 | 76 | 0.698198 |
b26c92e6cded8be8848e5349576722b9c32a73fb
| 30,130 |
py
|
Python
|
fence/resources/google/validity.py
|
scottyellis/fence
|
012ba76a58853169e9ee8e3f44a0dc510f4b2543
|
[
"Apache-2.0"
] | 31 |
2018-01-05T22:49:33.000Z
|
2022-02-02T10:30:23.000Z
|
fence/resources/google/validity.py
|
scottyellis/fence
|
012ba76a58853169e9ee8e3f44a0dc510f4b2543
|
[
"Apache-2.0"
] | 737 |
2017-12-11T17:42:11.000Z
|
2022-03-29T22:42:52.000Z
|
fence/resources/google/validity.py
|
scottyellis/fence
|
012ba76a58853169e9ee8e3f44a0dc510f4b2543
|
[
"Apache-2.0"
] | 46 |
2018-02-23T09:04:23.000Z
|
2022-02-09T18:29:51.000Z
|
"""
Objects with validity checking for Google service account registration.
"""
from collections import Mapping
from fence.errors import NotFound
from fence.resources.google.utils import (
get_registered_service_accounts_with_access,
get_project_access_from_service_accounts,
get_users_from_google_members,
get_service_account_ids_from_google_members,
is_google_managed_service_account,
)
from fence.resources.google.access_utils import (
is_valid_service_account_type,
service_account_has_external_access,
is_service_account_from_google_project,
get_google_project_membership,
get_google_project_parent_org,
get_google_project_valid_users_and_service_accounts,
do_all_users_have_access_to_project,
get_project_from_auth_id,
get_google_project_number,
get_service_account_policy,
remove_white_listed_service_account_ids,
is_org_whitelisted,
is_user_member_of_google_project,
is_user_member_of_all_google_projects,
)
from cirrus.google_cloud import GoogleCloudManager
from cdislogging import get_logger
logger = get_logger(__name__)
class ValidityInfo(Mapping):
"""
A representation of validity of an item along with
information about the validity.
It's a dict-like object which can be evaulated as a boolean value.
If the info is false-y, the validity of this object will evaluate to False.
CAVEAT: None is allowed and will not make the validity False, this
represents a validity check that was NOT run
(e.g. we have no information)
This means that you can nest ValidityInfo objects and
the "valid" status of the parent object will always be updated when adding
new validity information.
"""
def __init__(self, default_validity=True):
# innocent until proven guilty, default validity is True
self._valid = default_validity
self._info = {}
def get(self, key, *args):
return self._info.get(key, *args)
def set(self, key, value):
if not value and value is not None:
self._valid = False
self._info.__setitem__(key, value)
def __setitem__(self, key, value):
if not value and value is not None:
self._valid = False
self._info.__setitem__(key, value)
def __contains__(self, key):
return key in self._info
def __iter__(self):
for key, value in self._info.items():
yield key, value
def __getitem__(self, key):
return self._info[key]
def __delitem__(self, key):
del self._info[key]
def __len__(self):
return len(self._info)
def __bool__(self):
return self._valid
def __repr__(self):
return str(self._info)
def __str__(self):
return str(self._info)
def get_info(self):
return self._info
class GoogleProjectValidity(ValidityInfo):
"""
A representation of a Google Project's validity regarding service account
registration for access to data.
Example Usage and Information:
project_validity = (
GoogleProjectValidity(
google_project_id='abc123',
new_service_account='[email protected]',
new_service_account_access=['projectA', 'ProjectB']
)
)
project_validity.check_validity(early_return=False)
NOTE: project_validity can be evaluated as if it's a boolean and will
be False if ANY of the values in the dict-like structure are False.
In other words, a single invalid item will make the whole project
invalid.
An example of the dict-like structure with validity info is:
{
'monitor_has_access': True,
'valid_parent_org': True,
'valid_membership': True,
'new_service_account': {
'[email protected]': {
'valid_type': True,
'no_external_access': True,
'owned_by_project': True,
'exists': True
},
}
'service_accounts': {
'[email protected]': {
'valid_type': None,
'no_external_access': True,
'owned_by_project': True,
'exists': True
},
'[email protected]': {
'valid_type': None,
'no_external_access': True,
'owned_by_project': True,
'exists': True
}
},
'access': {
'ProjectA': {
'exists': True,
'all_users_have_access': True
},
'ProjectB': {
'exists': True,
'all_users_have_access': True
},
}
}
"""
def __init__(
self,
google_project_id,
new_service_account=None,
new_service_account_access=None,
user_id=None,
google_cloud_manager=None,
*args,
**kwargs
):
"""
Initialize
Args:
google_project_id (str): Google project identifier
new_service_account (str, optional): an additional service account
identifier (ex: email) to include when checking access. You can
provide this without actually giving it access to check if
access will be valid
new_service_account_access (List(str), optional): List of
Project.auth_ids to attempt to provide the new service account
access to
user_id (None, optional): User requesting validation. ONLY pass this if you
want to check if the user is a member of this project.
"""
self.google_project_id = google_project_id
self.new_service_account = new_service_account
self.new_service_account_access = new_service_account_access or []
self.user_id = user_id
self.google_cloud_manager = google_cloud_manager or GoogleCloudManager(
google_project_id
)
super(GoogleProjectValidity, self).__init__(*args, **kwargs)
# setup default values for error information, will get updated in
# check_validity
self._info["user_has_access"] = None
self._info["monitor_has_access"] = None
self._info["valid_parent_org"] = None
self._info["valid_member_types"] = None
self._info["members_exist_in_fence"] = None
self._info["new_service_account"] = {}
self._info["service_accounts"] = {}
self._info["access"] = {}
def check_validity(self, early_return=True, db=None):
"""
Determine whether or not project is valid for registration. If
early_return is False, this object will store information about the
failure.
Args:
early_return (bool, optional): Description
"""
self.google_cloud_manager.open()
logger.debug(
"Google Project with id: {}, "
"new service account requested: {}, project access requested: {}, "
"user requesting: {}".format(
self.google_project_id,
self.new_service_account,
self.new_service_account_access,
self.user_id,
)
)
logger.debug(
"Attempting to get project number "
"for project id {}".format(self.google_project_id)
)
google_project_number = get_google_project_number(
self.google_project_id, self.google_cloud_manager
)
has_access = bool(google_project_number)
self.set("monitor_has_access", has_access)
# always early return if we can't access the project
if not has_access:
logger.warning(
"INVALID Fence's Monitoring service account does "
"NOT have access in project id {}. Monitor needs access to continue "
"checking project validity. Exiting early and determining invalid.".format(
self.google_project_id
)
)
return
logger.debug(
"Retrieving project membership "
"for project id {}".format(self.google_project_id)
)
membership = get_google_project_membership(
self.google_project_id, self.google_cloud_manager
)
logger.debug(
"Project Members: {}".format(
str(
[
getattr(member, "email_id", "unknown_member_email")
for member in membership
]
)
)
)
if self.user_id is not None:
logger.debug(
"Checking that user requesting, {}, is part of "
"the Google Project with id {}".format(
self.user_id, self.google_project_id
)
)
user_has_access = is_user_member_of_google_project(
self.user_id, self.google_cloud_manager, membership=membership, db=db
)
self.set("user_has_access", user_has_access)
if not user_has_access:
# always early return if user isn't a member on the project
logger.warning(
"INVALID User {} "
"for project id {}. User is not a member of the project so does not "
"have permission on the project.".format(
self.user_id, self.google_project_id
)
)
return
logger.debug(
"Retrieving Parent Organization "
"for project id {} to make sure it's valid".format(self.google_project_id)
)
parent_org = get_google_project_parent_org(self.google_cloud_manager)
valid_parent_org = True
if parent_org:
valid_parent_org = is_org_whitelisted(parent_org)
self.set("valid_parent_org", valid_parent_org)
if not valid_parent_org:
logger.warning(
"INVALID Parent Organization {} "
"for project id {}. No parent org is allowed unless it's explicitly "
"whitelisted in cfg.".format(parent_org, self.google_project_id)
)
if early_return:
return
logger.debug(
"Determining if other users and service accounts on "
"project id {} are valid.".format(self.google_project_id)
)
user_members = None
service_account_members = []
try:
(
user_members,
service_account_members,
) = get_google_project_valid_users_and_service_accounts(
self.google_project_id, self.google_cloud_manager, membership=membership
)
self.set("valid_member_types", True)
except Exception:
self.set("valid_member_types", False)
logger.warning(
"INVALID users and/or service accounts (SAs) on "
"project id {}.".format(self.google_project_id)
)
if early_return:
return
logger.debug("Determining if valid users exist in fence.")
# if we have valid members, we can check if they exist in fence
users_in_project = None
if user_members is not None:
try:
users_in_project = get_users_from_google_members(user_members, db=db)
self.set("members_exist_in_fence", True)
except Exception as e:
self.set("members_exist_in_fence", False)
logger.warning(
"INVALID user(s) do not exist in fence and thus, "
"we cannot determine their authZ info: {}.".format(e)
)
if early_return:
return
# use a generic validityinfo object to hold all the service accounts
# validity. then check all the service accounts. Top level will be
# invalid if any service accounts are invalid
new_service_account_validity = ValidityInfo()
if self.new_service_account:
service_account_validity_info = GoogleServiceAccountValidity(
self.new_service_account,
self.google_project_id,
google_project_number=google_project_number,
google_cloud_manager=self.google_cloud_manager,
)
service_account_id = str(self.new_service_account)
logger.debug(
"Google Project with id: {} and number: {}. "
"Beginning validation on service account for registration {}".format(
self.google_project_id, google_project_number, service_account_id
)
)
logger.debug(
"Determining if the service account {} is google-managed.".format(
service_account_id
)
)
# we do NOT need to check the service account type and external access
# for google-managed accounts.
if is_google_managed_service_account(service_account_id):
logger.debug(
"GCP SA Validity -Service account {} IS google-managed. Therefore, "
"we do NOT need to check the SA Type or if it has external access.".format(
service_account_id
)
)
service_account_validity_info.check_validity(
early_return=early_return,
check_type=True,
check_policy_accessible=True,
check_external_access=False,
)
else:
logger.debug(
"GCP SA Validity -Service account {} is NOT google-managed. Therefore, "
"we need to run all validation checks against it.".format(
service_account_id
)
)
service_account_validity_info.check_validity(
early_return=early_return,
check_type=True,
check_policy_accessible=True,
check_external_access=True,
)
# update project with error info from the service accounts
new_service_account_validity.set(
service_account_id, service_account_validity_info
)
if not service_account_validity_info:
logger.warning("INVALID service account {}.".format(service_account_id))
# if we need to return early for invalid SA, make sure to include
# error details and invalidate the overall validity
if early_return:
self.set("new_service_account", new_service_account_validity)
return
self.set("new_service_account", new_service_account_validity)
logger.debug(
"Google Project with id: {} and number: {}. "
"Beginning validation on project service accounts not requested for "
"registration.".format(self.google_project_id, google_project_number)
)
service_accounts = get_service_account_ids_from_google_members(
service_account_members
)
logger.debug("SAs on the project {}.".format(service_accounts))
remove_white_listed_service_account_ids(service_accounts)
# don't double check service account being registered
if self.new_service_account:
try:
service_accounts.remove(self.new_service_account)
except ValueError:
logger.debug(
"Service Account requested for registration is not a "
"member of the Google project."
)
# use a generic validityinfo object to hold all the service accounts
# validity. then check all the service accounts. Top level will be
# invalid if any service accounts are invalid
service_accounts_validity = ValidityInfo()
for service_account in service_accounts:
service_account_validity_info = self._get_project_sa_validity_info(
service_account, google_project_number, early_return
)
# update project with error info from the service accounts
service_accounts_validity.set(
service_account, service_account_validity_info
)
if not service_account_validity_info and early_return:
# if we need to return early for invalid SA, make sure to include
# error details and invalidate the overall validity
self.set("service_accounts", service_accounts_validity)
return
self.set("service_accounts", service_accounts_validity)
logger.debug(
"Checking data access for Google Project {}...".format(
self.google_project_id
)
)
# get the service accounts for the project to determine all the data
# the project can access through the service accounts
service_accounts = get_registered_service_accounts_with_access(
self.google_project_id, db=db
)
# don't double check service account being updated if it was previously registered
# in other words, this may be an update of existing access (from A&B to just A)
# so we need to ONLY validate the new access (which happens below when the project
# access list is extended with new access requested)
if self.new_service_account:
logger.debug(
"Removing new/updated SA {} from list of existing SAs in order "
"to only validate the newly requested access.".format(
self.new_service_account
)
)
service_accounts = [
sa
for sa in service_accounts
if sa.email.lower() != str(self.new_service_account).lower()
]
service_account_project_access = get_project_access_from_service_accounts(
service_accounts, db=db
)
logger.debug(
"Registered SAs {} current have project access: {}".format(
[sa.email for sa in service_accounts], service_account_project_access
)
)
# use a generic validityinfo object to hold all the projects validity
project_access_validities = ValidityInfo()
# extend list with any provided access to test
for provided_access in self.new_service_account_access:
project = get_project_from_auth_id(provided_access)
# if provided access doesn't exist, set error in project_validity
if not project:
logger.warning(
"INVALID project access requested. "
"Data Access with auth_id {} does not exist.".format(
provided_access
)
)
project_validity = ValidityInfo()
project_validity.set("exists", False)
project_validity.set("all_users_have_access", None)
project_access_validities.set(str(provided_access), project_validity)
else:
service_account_project_access.append(project)
logger.debug(
"New project access requested (in addition to "
"previous access): {}.".format(service_account_project_access)
)
# make sure all the users of the project actually have access to all
# the data the service accounts have access to
for project in service_account_project_access:
project_validity = ValidityInfo()
project_validity.set("exists", True)
# if all the users exist in our db, we can check if they have valid
# access
logger.debug(
"Checking that all users in project have "
"access to project with id {}".format(
getattr(project, "id", "ERROR-could-not-get-project-id")
)
)
valid_access = None
if users_in_project:
valid_access = do_all_users_have_access_to_project(
users_in_project, project.id, db=db
)
if not valid_access:
logger.warning(
"INVALID Some users do NOT have "
"access to project with id {}. users in project: {}".format(
getattr(project, "id", "ERROR-could-not-get-project-id"),
[
getattr(user, "username", "unknown_user")
for user in users_in_project
],
)
)
project_validity.set("all_users_have_access", valid_access)
project_access_validities.set(str(project.auth_id), project_validity)
self.set("access", project_access_validities)
self.google_cloud_manager.close()
return
def _get_project_sa_validity_info(
self, service_account, google_project_number, early_return
):
service_account_id = str(service_account)
service_account_validity_info = GoogleServiceAccountValidity(
service_account,
self.google_project_id,
google_project_number=google_project_number,
google_cloud_manager=self.google_cloud_manager,
)
logger.debug(
"Google Project with id: {} and number: {}. "
"Beginning validation on project service account {}".format(
self.google_project_id, google_project_number, service_account_id
)
)
logger.debug(
"Determining if the service account {} is google-managed.".format(
service_account_id
)
)
# we do NOT need to check the service account type and external access
# for google-managed accounts.
if is_google_managed_service_account(service_account_id):
logger.debug(
"Service account {} IS google-managed. Therefore, "
"we only need to detemine if it belongs.".format(service_account_id)
)
service_account_validity_info.check_validity(
early_return=early_return,
check_type=False,
check_policy_accessible=False,
check_external_access=False,
)
else:
logger.debug(
"Service account {} is NOT google-managed. Therefore, "
"we need to run all validation checks against it.".format(
service_account_id
)
)
service_account_validity_info.check_validity(
early_return=early_return,
check_type=True,
check_policy_accessible=True,
check_external_access=True,
)
return service_account_validity_info
class GoogleServiceAccountValidity(ValidityInfo):
"""
A representation of a Google Service Accounts's validity regarding
registration for access to data.
Example Usage and Information:
sa_validity = (
GoogleServiceAccountValidity(
account_id='[email protected]'
google_project_id='abc123',
google_project_number='123456789'
)
)
sa_validity.check_validity(early_return=False)
NOTE: sa_validity can be evaluated as if it's a boolean and will
be False if ANY of the values in the dict-like structure are False.
In other words, a single invalid item will make the whole project
invalid.
An example of the dict-like structure with validity info is:
{
'valid_type': True,
'no_external_access': True,
'owned_by_project': True,
'exists': True
}
"""
def __init__(
self,
account_id,
google_project_id,
google_cloud_manager=None,
google_project_number=None,
*args,
**kwargs
):
self.account_id = account_id
self.google_project_id = google_project_id
# default to the given project id if not provided
self.google_project_number = google_project_number or google_project_id
self.google_cloud_manager = google_cloud_manager or GoogleCloudManager(
google_project_id
)
super(GoogleServiceAccountValidity, self).__init__(*args, **kwargs)
# setup default values for error information, will get updated in
# check_validity
self._info["owned_by_project"] = None
self._info["valid_type"] = None
self._info["no_external_access"] = None
self._info["policy_accessible"] = None
def check_validity(
self,
early_return=True,
check_type=True,
check_external_access=True,
check_policy_accessible=True,
):
logger.debug(
"Validating Google Service Account {} for Google Project {}.".format(
self.account_id, self.google_project_id
)
)
self.google_cloud_manager.open()
# check ownership
logger.debug(
"Determining if {} is owned by the Google Project.".format(self.account_id)
)
is_owned_by_google_project = is_service_account_from_google_project(
self.account_id, self.google_project_id, self.google_project_number
)
self.set("owned_by_project", is_owned_by_google_project)
if not is_owned_by_google_project:
logger.warning(
"INVALID SA {}, it is NOT owned by the Google Project {}.".format(
self.account_id, self.google_project_id
)
)
if early_return:
self.google_cloud_manager.close()
return
# select the GCM to use for the remainder of the checks
# if the account is not owned by the google project then
# it is invalid, however, if Fence has access to the SA's
# project, we can still check the other conditions
if is_owned_by_google_project:
gcm = self.google_cloud_manager
else:
self.google_cloud_manager.close()
try:
# check to see if we can access the project the SA belongs to
project_id = self.account_id.split("@")[-1].split(".")[0]
gcm = GoogleCloudManager(project_id)
gcm.open()
except Exception:
logger.debug(
"Could not access the Google Project for Service "
"Account {}. Unable to continue validity "
"checkingwithout access to the project, "
"early exit.".format(self.account_id)
)
return
# check if the SA's policy is accessible
policy_accessible = None
sa_policy = None
if check_policy_accessible:
try:
policy_accessible = True
sa_policy = get_service_account_policy(self.account_id, gcm)
except Exception:
policy_accessible = False
gcm.close()
return
finally:
self.set("policy_accessible", policy_accessible)
if check_external_access:
if not policy_accessible:
logger.warning(
"Invalid function use. External Access check requires "
"Service Account Policy & may fail if policy is not "
"accessible. If you want to check external access, make "
"sure you are also checking policy_accessible. "
)
gcm.close()
return
no_external_access = not (
service_account_has_external_access(self.account_id, gcm, sa_policy)
)
self.set("no_external_access", no_external_access)
if not no_external_access:
logger.warning(
"INVALID SA {}, it has external access "
"(keys generated or roles on it).".format(self.account_id)
)
if early_return:
gcm.close()
return
# check if the SA is an allowed type
if check_type:
if not policy_accessible:
logger.warning(
"Policy access was not checked. If the service account's "
"policy is not accessible or the service account does not "
"exist, this check may fail."
)
# don't return early, we can still check type without checking
# policy, however, if the SA doesn't exist, this will fail
valid_type = is_valid_service_account_type(self.account_id, gcm)
self.set("valid_type", valid_type)
if not valid_type:
logger.warning(
"INVALID SA {}, it is not a valid SA type.".format(self.account_id)
)
if early_return:
gcm.close()
return
gcm.close()
| 37.382134 | 95 | 0.580916 |
b2869f264e5ec055d354b7221146603060adb850
| 1,059 |
py
|
Python
|
tests/unittests.py
|
MarkusWET/secure_password_website
|
257d0e7d463dea4777d5d92f7f56499f2a4b5044
|
[
"MIT"
] | null | null | null |
tests/unittests.py
|
MarkusWET/secure_password_website
|
257d0e7d463dea4777d5d92f7f56499f2a4b5044
|
[
"MIT"
] | null | null | null |
tests/unittests.py
|
MarkusWET/secure_password_website
|
257d0e7d463dea4777d5d92f7f56499f2a4b5044
|
[
"MIT"
] | null | null | null |
import unittest
from app import app
class LoginTests(unittest.TestCase):
# initialization logic for the test suite declared in the test module
# code that is executed before all tests in one test run
@classmethod
def setUpClass(cls):
pass
# clean up logic for the test suite declared in the test module
# code that is executed after all tests in one test run
@classmethod
def tearDownClass(cls):
pass
# initialization logic
# code that is executed before each test
def setUp(self):
# create test client
self.app = app.test_client()
# set to testing
self.app.testing = True
# clean up logic
# code that is executed after each test
def tearDown(self):
pass
# test method
def test_startpage(self):
result = self.app.get("/")
self.assertEqual(result.status_code, 200)
# test method
def login_test_md5(self):
pass
# runs the unit tests in the module
if __name__ == '__main__':
unittest.main()
| 23.021739 | 73 | 0.647781 |
ac302097036ee59eacad5e760cb4e23573204574
| 2,559 |
py
|
Python
|
Plugins/CharakterAssistent/ChoicePopup.py
|
klnrdknt/Sephrasto
|
591224fe01825a169c21ebc6136533f282ce9a0b
|
[
"MIT"
] | 15 |
2017-11-09T12:49:52.000Z
|
2022-03-06T12:18:48.000Z
|
Plugins/CharakterAssistent/ChoicePopup.py
|
klnrdknt/Sephrasto
|
591224fe01825a169c21ebc6136533f282ce9a0b
|
[
"MIT"
] | 40 |
2018-02-01T21:32:01.000Z
|
2022-03-22T11:35:28.000Z
|
Plugins/CharakterAssistent/ChoicePopup.py
|
klnrdknt/Sephrasto
|
591224fe01825a169c21ebc6136533f282ce9a0b
|
[
"MIT"
] | 13 |
2018-03-12T17:50:42.000Z
|
2022-03-06T12:21:41.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ChoicePopup.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_formMain(object):
def setupUi(self, formMain):
formMain.setObjectName("formMain")
formMain.setWindowModality(QtCore.Qt.ApplicationModal)
formMain.resize(335, 238)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(formMain.sizePolicy().hasHeightForWidth())
formMain.setSizePolicy(sizePolicy)
formMain.setModal(True)
self.gridLayout = QtWidgets.QGridLayout(formMain)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.buttonBox = QtWidgets.QDialogButtonBox(formMain)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.horizontalLayout_3.addWidget(self.buttonBox)
self.gridLayout.addLayout(self.horizontalLayout_3, 5, 0, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem1, 1, 0, 1, 1)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.retranslateUi(formMain)
self.buttonBox.accepted.connect(formMain.accept)
QtCore.QMetaObject.connectSlotsByName(formMain)
def retranslateUi(self, formMain):
_translate = QtCore.QCoreApplication.translate
formMain.setWindowTitle(_translate("formMain", "Triff deine Auswahl"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
formMain = QtWidgets.QDialog()
ui = Ui_formMain()
ui.setupUi(formMain)
formMain.show()
sys.exit(app.exec_())
| 42.65 | 115 | 0.726846 |
5a3e17f7daaec820e968699d573c3d946fc4ce53
| 305 |
py
|
Python
|
python/advanced_sw/KALO/VERSION 0.01/SAFE1AUG/test.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/advanced_sw/KALO/VERSION 0.01/SAFE1AUG/test.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/advanced_sw/KALO/VERSION 0.01/SAFE1AUG/test.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
from AppOperations import AppOperations as ao
from AppOperations import Rec
from MISman import MIS_calculations
#ao.update_Sl(3,'2018-07-30T20:15:54')
#ao.reset_slno()
#k = Rec.timestmp()
#print("Type of k : ",type(k))
#print(Rec.timestmp())
val = "T-93"
MIS_calculations.separate_data("recpt_fees",7800)
| 27.727273 | 49 | 0.757377 |
5a6931d58b8671b50665f886f638591bf11ede91
| 1,141 |
py
|
Python
|
apps/profile/forms.py
|
IT-PM-OpenAdaptronik/Webapp
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 2 |
2017-12-17T21:28:22.000Z
|
2018-02-02T14:44:58.000Z
|
apps/profile/forms.py
|
IT-PM-OpenAdaptronik/Webapp
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 118 |
2017-10-31T13:45:09.000Z
|
2018-02-24T20:51:42.000Z
|
apps/profile/forms.py
|
OpenAdaptronik/Rattler
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | null | null | null |
from django import forms
from . import models
from apps.user.models import User
class ProfileForm(forms.ModelForm):
class Meta:
model = models.Profile
fields = ['company', 'info', 'expert', 'visibility_mail',
'visibility_company', 'visibility_info',
'visibility_first_name','visibility_last_name']
class ProfileImageForm(forms.ModelForm):
class Meta:
model = models.ProfileImage
fields = ['path']
class UserForm(forms.ModelForm):
class Meta:
model= User
fields = ['first_name','last_name']
ProfileImageFormSet = forms.inlineformset_factory(
models.Profile,
models.ProfileImage,
fk_name='profile',
form=ProfileImageForm,
extra=1,
max_num=1,
fields=('path',)
)
ProfileFormSet = forms.inlineformset_factory(
model=models.Profile,
parent_model=User,
fk_name='user',
form=ProfileForm,
extra=1,
max_num=1,
fields=('company', 'info', 'expert', 'visibility_mail',
'visibility_company', 'visibility_info',
'visibility_first_name','visibility_last_name',)
)
| 26.534884 | 66 | 0.64943 |
5a7e89ea99485fdfe11232f18d5f2e849a57de5d
| 511 |
py
|
Python
|
exercises/zh/test_02_10_03.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | 2 |
2020-07-07T01:46:37.000Z
|
2021-04-20T03:19:43.000Z
|
exercises/zh/test_02_10_03.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/zh/test_02_10_03.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
def test():
assert (
"span1.similarity(span2)" in __solution__ or "span2.similarity(span1)" in __solution__
), "你有计算两个span之间的相似度吗?"
assert span1.text == "不错的餐厅", "你有正确生成span1吗?"
assert span2.text == "很好的酒吧", "你有正确生成span2吗?"
assert (
0 <= float(similarity) <= 1
), "相似度分数是一个浮点数。你确定你计算正确了吗?"
__msg__.good(
"做得好!如果愿意的话你可以随便再做些比对其它实例的实验。"
"这些相似度并不*一定*是绝对正确的。一旦你要开始认真开发一些自然语言处理"
"的应用并且用到语义相似度的话,你可能需要在自己的数据上先训练词向量,或者"
"再去改进一下相似度的算法。"
)
| 31.9375 | 94 | 0.634051 |
ce82cdf35ef59afa5f450a19c5787aa7f88c7de1
| 474 |
py
|
Python
|
src/onegov/user/forms/group.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/user/forms/group.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/user/forms/group.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.form import Form
from onegov.user import _
from wtforms import StringField
from wtforms.validators import InputRequired
class UserGroupForm(Form):
""" A generic user group form for onegov.user """
name = StringField(
label=_("Name"),
validators=[
InputRequired()
]
)
def update_model(self, model):
model.name = self.name.data
def apply_model(self, model):
self.name.data = model.name
| 20.608696 | 53 | 0.64557 |
f01349868e6457e0582c7d0d91e3e5669f7e8783
| 1,737 |
py
|
Python
|
official/cv/ADNet/src/utils/get_train_videos.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/ADNet/src/utils/get_train_videos.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/ADNet/src/utils/get_train_videos.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# matlab code:
# https://github.com/hellbell/ADNet/blob/3a7955587b5d395401ebc94a5ab067759340680d/utils/get_train_videos.m
import numpy as np
from src.utils.get_benchmark_path import get_benchmark_path
from src.utils.get_benchmark_info import get_benchmark_info
def get_train_videos(opts, args):
train_db_names = opts['train_dbs']
test_db_names = opts['test_db']
video_names = []
video_paths = []
bench_names = []
for dbidx in range(len(train_db_names)):
bench_name = train_db_names[dbidx]
path_ = get_benchmark_path(bench_name, args)
video_names_ = get_benchmark_info(train_db_names[dbidx] + '-' + test_db_names)
video_paths_ = np.tile(path_, (1, len(video_names_)))
video_names.extend(video_names_)
video_paths.extend(list(video_paths_[0]))
#np.tile(
bench_names.extend(list(np.tile(bench_name, (1, len(video_names_)))[0]))
train_db = {
'video_names': video_names,
'video_paths': video_paths,
'bench_names': bench_names
}
return train_db
| 36.1875 | 106 | 0.690271 |
f03131d13835998178c7387f1b8862b539905b72
| 1,006 |
py
|
Python
|
comp/microsoft/000_missinteger.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
comp/microsoft/000_missinteger.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
comp/microsoft/000_missinteger.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
'''
https://app.codility.com/demo/results/demoHZEZJ5-D8X/
This is a demo task.
Write a function:
def solution(A)
that, given an array A of N integers, returns the smallest positive integer (greater than 0) that does not occur in A.
For example, given A = [1, 3, 6, 4, 1, 2], the function should return 5.
Given A = [1, 2, 3], the function should return 4.
Given A = [−1, −3], the function should return 1.
Write an efficient algorithm for the following assumptions:
N is an integer within the range [1..100,000];
each element of array A is an integer within the range [−1,000,000..1,000,000].
'''
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
# write your code in Python 3.6
pos_set = set()
for a in A:
if a > 0:
pos_set.add(a)
pos = list(pos_set)
pos.sort()
res = 1
for num in pos:
if res < num:
break
else:
res = num + 1
return res
| 21.869565 | 118 | 0.626243 |
e8de0a5350f0eb2bba79c4cfc6c1b4b6f0111a26
| 3,454 |
py
|
Python
|
official/gnn/bgcf/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/gnn/bgcf/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/gnn/bgcf/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
postprocess.
"""
import os
import argparse
import numpy as np
from src.metrics import BGCFEvaluate
from src.dataset import load_graph
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, default="Beauty", help="choose which dataset")
parser.add_argument("--datapath", type=str, default="./scripts/data_mr", help="minddata path")
parser.add_argument('--input_dim', type=int, default=64, choices=[64, 128],
help="user and item embedding dimension")
parser.add_argument('--Ks', type=list, default=[5, 10, 20, 100], help="top K")
parser.add_argument('--workers', type=int, default=8, help="number of process to generate data")
parser.add_argument("--result_path", type=str, default="./result_Files", help="result path")
args = parser.parse_args()
def get_acc():
"""calculate accuracy"""
train_graph, test_graph, _ = load_graph(args.datapath)
num_user = train_graph.graph_info()["node_num"][0]
num_item = train_graph.graph_info()["node_num"][1]
input_dim = args.input_dim
user_reps = np.zeros([num_user, input_dim * 3])
item_reps = np.zeros([num_item, input_dim * 3])
for i in range(50):
sub_folder = os.path.join(args.result_path, 'result_Files_' + str(i))
user_rep = np.fromfile(os.path.join(sub_folder, 'amazon-beauty_0.bin'), np.float16)
user_rep = user_rep.reshape(num_user, input_dim * 3)
item_rep = np.fromfile(os.path.join(sub_folder, 'amazon-beauty_1.bin'), np.float16)
item_rep = item_rep.reshape(num_item, input_dim * 3)
user_reps += user_rep
item_reps += item_rep
user_reps /= 50
item_reps /= 50
eval_class = BGCFEvaluate(args, train_graph, test_graph, args.Ks)
test_recall_bgcf, test_ndcg_bgcf, \
test_sedp, test_nov = eval_class.eval_with_rep(user_reps, item_reps, args)
print('recall_@10:%.5f, recall_@20:%.5f, ndcg_@10:%.5f, ndcg_@20:%.5f, '
'sedp_@10:%.5f, sedp_@20:%.5f, nov_@10:%.5f, nov_@20:%.5f\n' % (test_recall_bgcf[1],
test_recall_bgcf[2],
test_ndcg_bgcf[1],
test_ndcg_bgcf[2],
test_sedp[0],
test_sedp[1],
test_nov[1],
test_nov[2]))
if __name__ == "__main__":
get_acc()
| 45.447368 | 104 | 0.563984 |
9bf4c32944555678c0be8c28962fc849b5a956b9
| 484 |
py
|
Python
|
src/onegov/feriennet/views/userprofile.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/feriennet/views/userprofile.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/feriennet/views/userprofile.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.core.security import Personal
from onegov.feriennet import FeriennetApp
from onegov.feriennet.forms import UserProfileForm
from onegov.org.models import Organisation
from onegov.org.views.userprofile import handle_user_profile
@FeriennetApp.form(
model=Organisation, name='userprofile', template='userprofile.pt',
permission=Personal, form=UserProfileForm)
def handle_custom_user_profile(self, request, form):
return handle_user_profile(self, request, form)
| 37.230769 | 70 | 0.82438 |
c5afc372c05e400d4752b1d45d198e8ddd4705c6
| 2,349 |
py
|
Python
|
crypto/PsychECC/src/server.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
crypto/PsychECC/src/server.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
crypto/PsychECC/src/server.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
import random
from secret import flag
from Crypto.Util.number import inverse
def moddiv(x,y,p):
return (x * inverse(y,p)) %p
Point = namedtuple("Point","x y")
class EllipticCurve:
INF = Point(0,0)
def __init__(self, a, b, p):
self.a = a
self.b = b
self.p = p
def add(self,P,Q):
if P == self.INF:
return Q
elif Q == self.INF:
return P
if P.x == Q.x and P.y == (-Q.y % self.p):
return self.INF
if P != Q:
Lambda = moddiv(Q.y - P.y, Q.x - P.x, self.p)
else:
Lambda = moddiv(3 * P.x**2 + self.a,2 * P.y , self.p)
Rx = (Lambda**2 - P.x - Q.x) % self.p
Ry = (Lambda * (P.x - Rx) - P.y) % self.p
return Point(Rx,Ry)
def multiply(self,P,n):
n %= self.p
if n != abs(n):
ans = self.multiply(P,abs(n))
return Point(ans.x, -ans.y % p)
R = self.INF
while n > 0:
if n % 2 == 1:
R = self.add(R,P)
P = self.add(P,P)
n = n // 2
return R
# P256 parameters, secure.
p = 115792089210356248762697446949407573530086143415290314195533631308867097853951
order = 115792089210356248762697446949407573529996955224135760342422259061068512044369
a = -3
b = 41058363725152142129326129780047268409114441015993725554835256314039467401291
E = EllipticCurve(a,b,p)
print("Welcome to my prediction centre!")
print("We're always looking out for psychics!")
print("We're gonna choose a random number. You get to choose a point. We'll multiply that point by our random number.")
print("Since this curve is of perfect and prime order, it'll be impossible to break this test.")
print("Only a psychic could know!")
print("Be psychic, get the flag.")
x = int(input("Enter point x: "))
y = int(input("Enter point y: "))
P = Point(x,y)
n = random.randint(1,order)
Q = E.multiply(P,n)
print("Ok, where do you think the point will go?")
px = int(input("Enter point x: "))
py = int(input("Enter point y: "))
prediction = Point(px,py)
if prediction == E.INF or prediction == P:
print("Psychics don't use dirty tricks.")
quit()
if prediction == Q:
print("Wow! You're truly psychic!")
print(flag)
quit()
print("Better luck next time.")
print(f"Point was {Q}")
| 33.557143 | 119 | 0.60281 |
7684a56cfccea3087f6327413a98d6b00995719f
| 469 |
py
|
Python
|
tests/test_all.py
|
simonhodder/spark
|
1062e3092996f82f7bc2c852d6444c2be2a1e296
|
[
"MIT"
] | null | null | null |
tests/test_all.py
|
simonhodder/spark
|
1062e3092996f82f7bc2c852d6444c2be2a1e296
|
[
"MIT"
] | null | null | null |
tests/test_all.py
|
simonhodder/spark
|
1062e3092996f82f7bc2c852d6444c2be2a1e296
|
[
"MIT"
] | null | null | null |
import pytest
from spark import Session
from spark.model import User
from spark.model import Address
def test_all():
session = Session()
a = Address(
house_num=10,
street='Baker Street',
city='London')
u = User(
first_name='Sherlock',
family_name='Holmes',
age=77,
address=a)
session.add(u)
session.flush()
session.commit()
a = Address.query.first()
assert a is not None
| 15.129032 | 31 | 0.594883 |
4f2a92425277c903d75696c5a298df085cce0c21
| 1,602 |
py
|
Python
|
Organisation/Recherche/Wordcloud/history/tests_wordcloud_pil.py
|
mxsph/Data-Analytics
|
c82ff54b78f50b6660d7640bfee96ea68bef598f
|
[
"MIT"
] | 3 |
2020-08-24T19:02:09.000Z
|
2021-05-27T20:22:41.000Z
|
Organisation/Recherche/Wordcloud/history/tests_wordcloud_pil.py
|
mxsph/Data-Analytics
|
c82ff54b78f50b6660d7640bfee96ea68bef598f
|
[
"MIT"
] | 342 |
2020-08-13T10:24:23.000Z
|
2021-08-12T14:01:52.000Z
|
Organisation/Recherche/Wordcloud/history/tests_wordcloud_pil.py
|
visuanalytics/visuanalytics
|
f9cce7bc9e3227568939648ddd1dd6df02eac752
|
[
"MIT"
] | 8 |
2020-09-01T07:11:18.000Z
|
2021-04-09T09:02:11.000Z
|
import numpy as np
import PIL
from PIL import Image
# Quelle: https://stackoverflow.com/questions/30227466/combine-several-images-horizontally-with-python
# Kombinieren von 4 Zahlen
year = str(19)
list_im = ['mask_0.jpg', 'mask_1.jpg', 'mask_2.jpg', 'mask_3.jpg', 'mask_4.jpg', 'mask_5.jpg', 'mask_6.jpg',
'mask_7.jpg', 'mask_8.jpg', 'mask_9.jpg']
list_use = []
len_year = len(year)
print(len_year)
for i in range(len_year):
if int(year[i]) == 0:
print("0")
list_use.append(list_im[0])
elif int(year[i]) == 1:
print("1")
list_use.append(list_im[1])
elif int(year[i]) == 2:
print("2")
list_use.append(list_im[2])
elif int(year[i]) == 3:
print("3")
list_use.append(list_im[3])
elif int(year[i]) == 4:
print("4")
list_use.append(list_im[4])
elif int(year[i]) == 5:
print("5")
list_use.append(list_im[5])
elif int(year[i]) == 6:
print("6")
list_use.append(list_im[6])
elif int(year[i]) == 7:
print("7")
list_use.append(list_im[7])
elif int(year[i]) == 8:
print("8")
list_use.append(list_im[8])
elif int(year[i]) == 9:
print("9")
list_use.append(list_im[9])
imgs = [PIL.Image.open(i) for i in list_use]
# pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)
imgs_comb = np.hstack(imgs)
# sabspeichern des Bildes
imgs_comb = PIL.Image.fromarray(imgs_comb)
imgs_comb.save('year_mask.jpg')
| 30.807692 | 110 | 0.58427 |
96f3561c120d67b53ea714ac978d7f6ac7ee4115
| 2,537 |
py
|
Python
|
andinopy/base_devices/terminal.py
|
andino-systems/andinopy
|
28fc09fbdd67dd690b9b3f80f03a05c342c777e1
|
[
"Apache-2.0"
] | null | null | null |
andinopy/base_devices/terminal.py
|
andino-systems/andinopy
|
28fc09fbdd67dd690b9b3f80f03a05c342c777e1
|
[
"Apache-2.0"
] | null | null | null |
andinopy/base_devices/terminal.py
|
andino-systems/andinopy
|
28fc09fbdd67dd690b9b3f80f03a05c342c777e1
|
[
"Apache-2.0"
] | null | null | null |
# _ _ _
# / \ _ __ __| (_)_ __ ___ _ __ _ _
# / _ \ | '_ \ / _` | | '_ \ / _ \| '_ \| | | |
# / ___ \| | | | (_| | | | | | (_) | |_) | |_| |
# /_/ \_\_| |_|\__,_|_|_| |_|\___/| .__/ \__, |
# |_| |___/
# by Jakob Groß
import sys
from andinopy import andinopy_logger
from andinopy.base_devices import andinoio
from andinopy.base_devices.nextion_display import display
from andinopy.interfaces.rfid_keyboard_interface import rfid_keyboard_interface
class terminal:
# Part instances
andinoio_instance: andinoio.andinoio
display_instance: 'display'
rfid_keyboard_instance: 'rfid_keyboard_interface'
def __init__(self,
andinoio_instance: andinoio.andinoio = None,
display_instance: 'display' = None,
rfid_keyboard_instance: 'rfid_keyboard_interface' = None):
"""
Create a new Terminal instance
Can be passed preconfigured handles
"""
if sys.platform == "linux":
from andinopy.base_devices.rfid_keyboard.rfid_keyboard_i2c import rfid_keyboard_i2c
self.rfid_keyboard_instance = rfid_keyboard_instance \
if rfid_keyboard_instance is not None else rfid_keyboard_i2c()
else:
from andinopy.base_devices.rfid_keyboard.rfid_keyboard_serial import rfid_keyboard_serial
self.rfid_keyboard_instance = rfid_keyboard_instance \
if rfid_keyboard_instance is not None else rfid_keyboard_serial()
andinopy_logger.debug("Terminal starting initialization")
self.andinoio_instance = andinoio_instance if andinoio_instance is not None else andinoio.andinoio()
self.display_instance = display_instance if display_instance is not None else display()
andinopy_logger.debug("Terminal device initialized")
def start(self):
"""
Start the terminal with the custom configuration
:return: None
"""
andinopy_logger.debug("Terminal device started")
self.andinoio_instance.start()
self.display_instance.start()
self.rfid_keyboard_instance.start()
andinopy_logger.debug("Terminal - everything started")
def stop(self):
"""
Stops and resets the terminal
:return:
"""
andinopy_logger.debug("Terminal device stopped")
self.andinoio_instance.stop()
self.display_instance.stop()
self.rfid_keyboard_instance.stop()
| 39.030769 | 108 | 0.638944 |
96fb170a7a6d9f10eaa8fe7068f29425d7106d74
| 931 |
py
|
Python
|
Python/M01_ProgrammingBasics/L03_ConditionalStatementsAdvanced/Exercises/Solutions/P08_OnTimeForTheExam.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L03_ConditionalStatementsAdvanced/Exercises/Solutions/P08_OnTimeForTheExam.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L03_ConditionalStatementsAdvanced/Exercises/Solutions/P08_OnTimeForTheExam.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | 1 |
2022-02-23T13:03:14.000Z
|
2022-02-23T13:03:14.000Z
|
hour_exam = int(input())
min_exam = int(input())
hour_arrival = int(input())
min_arrival = int(input())
status = 0
exam_time = (hour_exam * 60) + min_exam
arrival_time = (hour_arrival * 60) + min_arrival
difference = arrival_time - exam_time
if difference > 0:
status = "Late"
elif difference < -30:
status = "Early"
elif -30 <= difference <= 0:
status = "On time"
print(status)
if difference > 0:
hour_late = difference // 60
min_late = difference % 60
if hour_late == 0:
print(f'{min_late} minutes after the start')
else:
print(f'{hour_late}:{min_late:02d} hours after the start')
elif difference == 0:
print('')
else:
early = exam_time - arrival_time
hour_early = early // 60
min_early = early % 60
if hour_early == 0:
print(f'{min_early} minutes before the start')
else:
print(f'{hour_early}:{min_early:02d} hours before the start')
| 21.651163 | 69 | 0.640172 |
8c08eb958f63b96fead9449d21847b5bbe851075
| 3,879 |
py
|
Python
|
DIT/jazmin_settings.py
|
antonnifo/DIT
|
7c496f37bab70229cd84c4b33332708ea8cf278b
|
[
"MIT"
] | null | null | null |
DIT/jazmin_settings.py
|
antonnifo/DIT
|
7c496f37bab70229cd84c4b33332708ea8cf278b
|
[
"MIT"
] | null | null | null |
DIT/jazmin_settings.py
|
antonnifo/DIT
|
7c496f37bab70229cd84c4b33332708ea8cf278b
|
[
"MIT"
] | null | null | null |
JAZZMIN_SETTINGS = {
# title of the window
'site_title': 'DIT Admin',
# Title on the brand, and the login screen (19 chars max)
'site_header': 'DIT',
# square logo to use for your site, must be present in static files, used for favicon and brand on top left
'site_logo': 'data_log_sheet/img/logo.png',
# Welcome text on the login screen
'welcome_sign': 'Welcome to DIT Data Log Sheet',
# Copyright on the footer
'copyright': 'antonnifo',
# The model admin to search from the search bar, search bar omitted if excluded
# 'search_model': 'auth.User',
'search_model': 'data_log_sheet.DataLogSheet',
# Field name on user model that contains avatar image
'user_avatar': None,
############
# Top Menu #
############
# Links to put along the top menu
'topmenu_links': [
# Url that gets reversed (Permissions can be added)
{'name': 'Home', 'url': 'admin:index', 'permissions': ['auth.view_user']},
# external url that opens in a new window (Permissions can be added)
# {'name': 'Support', 'url': 'https://github.com/farridav/django-jazzmin/issues', 'new_window': True},
# {'name': 'Site', 'url': 'https://www.citizenweekly.org/', 'new_window': True},
# model admin to link to (Permissions checked against model)
{'model': 'data_log_sheet.DataLogSheet'},
{'model': 'auth.User'},
# App with dropdown menu to all its models pages (Permissions checked against models)
{'app': 'data_log_sheet'},
],
#############
# User Menu #
#############
# Additional links to include in the user menu on the top right ('app' url type is not allowed)
'usermenu_links': [
{'model': 'auth.user'}
],
#############
# Side Menu #
#############
# Whether to display the side menu
'show_sidebar': True,
# Whether to aut expand the menu
'navigation_expanded': True,
# Hide these apps when generating side menu e.g (auth)
'hide_apps': [],
# Hide these models when generating side menu (e.g auth.user)
'hide_models': [],
# List of apps to base side menu ordering off of (does not need to contain all apps)
'order_with_respect_to': ['data_log_sheet',],
# Custom links to append to app groups, keyed on app name
# 'custom_links': {
# 'polls': [{
# 'name': 'Make Messages',
# 'url': 'make_messages',
# 'icon': 'fas fa-comments',
# 'permissions': ['polls.view_poll']
# }]
# },
# Custom icons for side menu apps/models See https://www.fontawesomecheatsheet.com/font-awesome-cheatsheet-5x/
# for a list of icon classes
'icons': {
'auth': 'fas fa-users-cog',
'auth.user': 'fas fa-user',
'auth.Group': 'fas fa-users',
'data_log_sheet.DataLogSheet': 'fas fa-file-alt',
},
# Icons that are used when one is not manually specified
'default_icon_parents': 'fas fa-chevron-circle-right',
'default_icon_children': 'fab fa-pied-piper-alt',
#############
# UI Tweaks #
#############
# Relative paths to custom CSS/JS scripts (must be present in static files)
"custom_css": None,
"custom_js": None,
# Whether to show the UI customizer on the sidebar
"show_ui_builder": False,
###############
# Change view #
###############
# Render out the change view as a single form, or in tabs, current options are
# - single
# - horizontal_tabs (default)
# - vertical_tabs
# - collapsible
# - carousel
"changeform_format": "carousel",
# override change forms on a per modeladmin basis
"changeform_format_overrides": {"auth.user": "carousel", "auth.group": "carousel",},
# Add a language dropdown into the admin
"language_chooser": False,
}
| 31.795082 | 114 | 0.599123 |
d78484879a758531427928b0c252ba3e8a3160ed
| 3,102 |
py
|
Python
|
python/en/archive/topics/command_line_arguments/TODOs/02-cmd_line_args_parsing2.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/topics/command_line_arguments/TODOs/02-cmd_line_args_parsing2.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/topics/command_line_arguments/TODOs/02-cmd_line_args_parsing2.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, getopt
# 2019-03-01 (Fri)
# TODO: Make this script work!
def usage():
print("usage: python3 02-cmd_line_args_parsing2.py arg1 arg2 arg3")
def print_opts():
print("file_name =", file_name )
print("argc =", argc )
print("argv =", argv )
print("opts =", opts )
print("args =", args )
print("input_file=", input_file )
print("output_file=", output_file )
def parse_arguments( argc, argv ):
print("argv =", argv )
print("argc =", argc )
try:
# opts, args = getopt.getopt( argv, "", [])
# Input
# argv is the (entire) argument list
# "" is a short option starting with a hyphen -. Example: -h
# An argument should be followed by a colon (:).
# [] is a long option start with two hyphens --. Example: --help
# An argument should be followed by an equal sign ('=').
# Output
# opts is a list of (option, value) pairs.
# args is the list of program arguments left after the option list was stripped.
short_options = "hc:i:o:"
long_options = ["help", "config=", "input=", "output="]
opts, args = getopt.getopt( argv, short_options, long_options)
print("opts =", opts )
print("args =", args )
except getopt.GetoptError:
usage()
sys.exit(2)
config_file = ''
input_file = ''
output_file = ''
for option, value in opts:
if option in ("-h", "--help"):
usage()
sys.exit()
elif option in ("-c", "--config"):
config_file = arg
elif option in ("-i", "--input"):
input_file = arg
elif option in ("-o","--output"):
output_file = arg
else :
usage()
sys.exit(2)
if __name__ == "__main__":
# Process the command line arguments
argc = len( sys.argv )
file_name = sys.argv[0]
argv = sys.argv[1:]
print("file_name =", file_name )
print("sys.argv =", sys.argv )
parse_arguments( argc, argv )
'''
Last Modified: 2019-03-01 (Fri)
First Written: 2018-11-08 (Thu)
Tae-Hyung "T" Kim, [email protected]
There are a couple of popular packages to parse the command line arguments
such as getopt, argparse, and docopt. My choice is getopt. Note optparse is obsolete.
Ref: Python - Command Line Arguments
https://www.tutorialspoint.com/python/python_command_line_arguments.htm
Syntax of getopt
getopt.getopt(argv, options, [long_options])
argv is the argument list
options is a short option starting with a hyphen -. Example: -h
An argument should be followed by a colon (:).
long_options is a long option start with two hyphens --. Example: --help
An argument should be followed by an equal sign ('=').
This method returns value consisting of two elements:
the first is a list of (option, value) pairs.
The second is the list of program arguments left after the option list was stripped.
Command Examples
$ python3 02-cmd_line_args_parsing2.py arg1 arg2 arg3
$ python 02-cmd_line_args_parsing2.py arg1 arg2 arg3
'''
| 28.722222 | 88 | 0.626692 |
d1618676c96751aae62213eeddfc4719ddc88479
| 660 |
py
|
Python
|
haas_lib_bundles/python/docs/examples/smart_panel/esp32/code/main.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
haas_lib_bundles/python/docs/examples/smart_panel/esp32/code/main.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
haas_lib_bundles/python/docs/examples/smart_panel/esp32/code/main.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
@File : main.py
@Description: helloworld案例 - 周期性打印"helloworld"到console中
@Author : ethan.lcz
@version : 1.0
'''
# import utime # 延时函数在utime库中
# if __name__ == '__main__':
# while True: # 无限循环
# print("hellworld") # 打印"helloworld"字串到console中
# utime.sleep(1) # 打印完之后休眠1秒
from axp192 import *
if __name__ == '__main__':
axp = Axp192()
# axp.powerAll()
# axp.setLCDBrightness(80) # 设置背光亮度 0~100
axp.setSpkEnable(1) # 打开speaker
# from smart_panel import *
import smart_panel
smart_panel.init()
smart_panel.load_smart_panel()
| 22 | 58 | 0.619697 |
7e6298887a3a629ee0e03a11f571bf4451a16fc2
| 4,621 |
py
|
Python
|
tests/ingestion/transformers/monosi/test_metrics.py
|
monosidev/monosi
|
a88b689fc74010b10dbabb32f4b2bdeae865f4d5
|
[
"Apache-2.0"
] | 156 |
2021-11-19T18:50:14.000Z
|
2022-03-31T19:48:59.000Z
|
tests/ingestion/transformers/monosi/test_metrics.py
|
monosidev/monosi
|
a88b689fc74010b10dbabb32f4b2bdeae865f4d5
|
[
"Apache-2.0"
] | 30 |
2021-12-27T19:30:56.000Z
|
2022-03-30T17:49:00.000Z
|
tests/ingestion/transformers/monosi/test_metrics.py
|
monosidev/monosi
|
a88b689fc74010b10dbabb32f4b2bdeae865f4d5
|
[
"Apache-2.0"
] | 14 |
2022-01-17T23:24:34.000Z
|
2022-03-29T09:27:47.000Z
|
from datetime import datetime
from uuid import uuid4
import pytest
import ingestion.transformers.monosi.metrics as metrics
@pytest.fixture
def metric():
return {
"id": str(uuid4().hex),
"table_name": "table",
"schema": "schema",
"database": "database",
"column_name": "col_name",
"metric": "metric-name",
"value": "84848.9",
"time_window_start": str(datetime.now()),
"time_window_end": str(datetime.now()),
"created_at": str(datetime.now())
}
@pytest.fixture
def metric_diff_col_and_type():
return {
"id": str(uuid4().hex),
"table_name": "table",
"schema": "schema",
"database": "database",
"column_name": "another_col",
"metric": "metric-name-2",
"value": "84848.9",
"time_window_start": str(datetime.now()),
"time_window_end": str(datetime.now()),
"created_at": str(datetime.now())
}
@pytest.fixture
def metrics_input():
return {
'columns': ['TIME_WINDOW_START', 'TIME_WINDOW_END'],
'rows': [
]
}
@pytest.fixture
def metrics_input_one():
return {
'columns': ['TIME_WINDOW_START', 'TIME_WINDOW_END'],
'rows': [
{
'WINDOW_START': '123',
'WINDOW_END': '123',
'TABLE_NAME': 'table',
'SCHEMA_NAME': 'schema',
'DATABASE_NAME': 'database',
'anything___else': 'value'
}
]
}
@pytest.fixture
def metrics_input_multiple():
return {
'columns': ['TIME_WINDOW_START', 'TIME_WINDOW_END'],
'rows': [
{
'WINDOW_START': '123',
'WINDOW_END': '123',
'TABLE_NAME': 'table',
'SCHEMA_NAME': 'schema',
'DATABASE_NAME': 'database',
'anything___else': 'value',
'one___more': 'value'
}
]
}
def test__transform_empty_metrics(metrics_input):
output_arr = metrics.MetricTransformer._transform(metrics_input)
assert len(output_arr) == 0
def test__transform_one_metric(metrics_input_one):
output_arr = metrics.MetricTransformer._transform(metrics_input_one)
assert len(output_arr) == 1
def test__transform_multiple_metrics(metrics_input_multiple):
output_arr = metrics.MetricTransformer._transform(metrics_input_multiple)
assert len(output_arr) == 2
@pytest.fixture
def normalized_schema():
return metrics.MetricTransformer._normalized_schema()
def test__normalized_schema_correct(normalized_schema, metric):
input_arr = [metric]
is_correct = metrics.MetricTransformer.match(input_arr, normalized_schema)
assert is_correct == True
def test__normalized_schema_correct_multiple(normalized_schema, metric, metric_diff_col_and_type):
input_arr = [metric, metric_diff_col_and_type]
is_correct = metrics.MetricTransformer.match(input_arr, normalized_schema)
assert is_correct == True
def test__normalized_schema_incorrect_to_have_none(normalized_schema):
input_arr = []
is_correct = metrics.MetricTransformer.match(input_arr, normalized_schema)
assert is_correct == False
def test__normalized_schema_incorrect(normalized_schema, metric):
input_arr = [{'anything': 'atall'}]
is_correct = metrics.MetricTransformer.match(input_arr, normalized_schema)
assert is_correct == False
def test__normalized_schema_incorrect_multiple(normalized_schema, metric):
input_arr = [metric, {'anything': 'else'}]
is_correct = metrics.MetricTransformer.match(input_arr, normalized_schema)
assert is_correct == False
# TODO OOOOOOOOOOOOOOOP
@pytest.fixture
def original_schema():
return metrics.MetricTransformer._original_schema()
def test__original_schema_correct(original_schema, metrics_input_one):
is_correct = metrics.MetricTransformer.match(metrics_input_one, original_schema)
assert is_correct == True
def test__original_schema_correct_multiple(original_schema, metrics_input_multiple):
is_correct = metrics.MetricTransformer.match(metrics_input_multiple, original_schema)
assert is_correct == True
def test__original_schema_incorrect_to_have_none(original_schema, metrics_input):
is_correct = metrics.MetricTransformer.match(metrics_input, original_schema)
assert is_correct == False
def test__original_schema_incorrect(original_schema):
input_arr = [{'anything': 'goeshere'}]
is_correct = metrics.MetricTransformer.match(input_arr, original_schema)
assert is_correct == False
| 29.621795 | 98 | 0.675179 |
0eb131abaf4798bda047b122c8add371333f91a3
| 347 |
py
|
Python
|
2-resources/_Past-Projects/LambdaSQL-master/LambdaSQL-master/module2/titanic_exploration.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_Past-Projects/LambdaSQL-master/LambdaSQL-master/module2/titanic_exploration.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_Past-Projects/LambdaSQL-master/LambdaSQL-master/module2/titanic_exploration.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
"""
Titanic Data
"""
import psycopg2
from module2.creds import cred
cloud = psycopg2.connect(
dbname=cred.dbname,
user=cred.user,
password=cred.password,
host=cred.host,
)
curs = cloud.cursor()
curs.execute("""
SELECT name, age
FROM Titanic
WHERE Age < 18 and sex = 'female';
""")
print("Kids:", *curs.fetchall(), sep='\n')
| 13.88 | 42 | 0.657061 |
7ebce8954edc68f34f0304cb662e16a466838526
| 400 |
py
|
Python
|
src/main/bootup.py
|
MBogert/ReminderCory
|
a687c70a12f49a807d9fb023d45f799292a37f26
|
[
"MIT"
] | null | null | null |
src/main/bootup.py
|
MBogert/ReminderCory
|
a687c70a12f49a807d9fb023d45f799292a37f26
|
[
"MIT"
] | null | null | null |
src/main/bootup.py
|
MBogert/ReminderCory
|
a687c70a12f49a807d9fb023d45f799292a37f26
|
[
"MIT"
] | null | null | null |
def main():
# Pseudo-Implementation
# TODO Implement logic in comment lines
# Initialize Logger
# Initialize db from data-file
# Initialize cory
# Health checks (if any)
# Welcome message
# Begin runtime (initialize at idle)
if __name__ == "__main__":
main()
# TODO implementations
def initialize_cory:
def read_reminder_file(cory, filename):
def run_health_checks():
def run_cory():
| 17.391304 | 40 | 0.7375 |
adf523b4f68706e9e2562370ca9d27049107c8ca
| 278 |
py
|
Python
|
backend/apps/iamstudent/migrations/0011_merge_20200404_2309.py
|
n-hackert/match4healthcare
|
761248c27b49e568c545c643a72eac9a040649d7
|
[
"MIT"
] | 2 |
2020-03-28T13:56:39.000Z
|
2020-03-29T10:16:12.000Z
|
backend/apps/iamstudent/migrations/0011_merge_20200404_2309.py
|
n-hackert/match4healthcare
|
761248c27b49e568c545c643a72eac9a040649d7
|
[
"MIT"
] | 76 |
2020-03-27T21:53:04.000Z
|
2020-03-30T20:27:43.000Z
|
backend/apps/iamstudent/migrations/0011_merge_20200404_2309.py
|
n-hackert/match4healthcare
|
761248c27b49e568c545c643a72eac9a040649d7
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.4 on 2020-04-02 23:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('iamstudent', '0010_custom_migration'),
('iamstudent', '0009_merge_20200402_2309'),
]
operations = [
]
| 18.533333 | 51 | 0.654676 |
adfe5d6ffd067abf150a81bc59a5b806061cdc34
| 3,365 |
py
|
Python
|
v302/python/mittelwert-kondensator.py
|
chrbeckm/anfaenger-praktikum
|
51764ff23901de1bc3d16dc935acfdc66bb2b2b7
|
[
"MIT"
] | 2 |
2019-12-10T10:25:11.000Z
|
2021-01-26T13:59:40.000Z
|
v302/python/mittelwert-kondensator.py
|
chrbeckm/anfaenger-praktikum
|
51764ff23901de1bc3d16dc935acfdc66bb2b2b7
|
[
"MIT"
] | null | null | null |
v302/python/mittelwert-kondensator.py
|
chrbeckm/anfaenger-praktikum
|
51764ff23901de1bc3d16dc935acfdc66bb2b2b7
|
[
"MIT"
] | 1 |
2020-12-06T21:24:58.000Z
|
2020-12-06T21:24:58.000Z
|
import numpy as np
import uncertainties.unumpy as unp
from uncertainties import ufloat
from scipy.stats import sem
print('====================')
print('Kondensator ANFANG')
print('====================')
print('Wert 1 ANFANG')
cao2 = 994 * 10**(-9)
pot1 = 6.03
c2 = ufloat(cao2, cao2 * 0.002)
pot2 = 10 - pot1
pot3 = pot1 / pot2
pot = ufloat(pot3, pot3 * 0.005)
ca1 = c2 / pot
print('ca1', ca1)
cao2 = 750 * 10**(-9)
pot1 = 5.305
c2 = ufloat(cao2, cao2 * 0.002)
pot2 = 10 - pot1
pot3 = pot1 / pot2
pot = ufloat(pot3, pot3 * 0.005)
cb1 = c2 / pot
print('cb1', cb1)
cao2 = 399 * 10**(-9)
pot1 = 3.78
c2 = ufloat(cao2, cao2 * 0.002)
pot2 = 10 - pot1
pot3 = pot1 / pot2
pot = ufloat(pot3, pot3 * 0.005)
cc1 = c2 / pot
print('cc1', cc1)
c = np.array([unp.nominal_values(ca1), unp.nominal_values(cb1), unp.nominal_values(cc1)])
d = np.array([unp.std_devs(ca1), unp.std_devs(cb1), unp.std_devs(cc1)])
np.savetxt('build/wert1.txt', np.column_stack([c, d]), header='c d')
print('c-mean', c.mean())
print('c-std', sem(c))
print('Wert 1 ENDE')
print('====================')
print('Wert 3 ANFANG')
cao2 = 994 * 10**(-9)
pot1 = 7.05
c2 = ufloat(cao2, cao2 * 0.002)
pot2 = 10 - pot1
pot3 = pot1 / pot2
pot = ufloat(pot3, pot3 * 0.005)
ca1 = c2 / pot
print('ca1', ca1)
cao2 = 750 * 10**(-9)
pot1 = 6.395
c2 = ufloat(cao2, cao2 * 0.002)
pot2 = 10 - pot1
pot3 = pot1 / pot2
pot = ufloat(pot3, pot3 * 0.005)
cb1 = c2 / pot
print('cb1', cb1)
cao2 = 399 * 10**(-9)
pot1 = 4.89
c2 = ufloat(cao2, cao2 * 0.002)
pot2 = 10 - pot1
pot3 = pot1 / pot2
pot = ufloat(pot3, pot3 * 0.005)
cc1 = c2 / pot
print('cc1', cc1)
c = np.array([unp.nominal_values(ca1), unp.nominal_values(cb1), unp.nominal_values(cc1)])
d = np.array([unp.std_devs(ca1), unp.std_devs(cb1), unp.std_devs(cc1)])
np.savetxt('build/wert3.txt', np.column_stack([c, d]), header='c d')
print('c-mean', c.mean())
print('c-std', sem(c))
print('Wert 3 ENDE')
print('====================')
print('Wert 8 ANFANG')
cao2 = 994 * 10**(-9)
pot1 = 1.175
rao2 = 5000
c2 = ufloat(cao2, cao2 * 0.002)
pot2 = 10 - pot1
pot3 = pot1 / pot2
pot = ufloat(pot3, pot3 * 0.005)
ca1 = c2 / pot
r2 = ufloat(rao2, rao2 * 0.03)
ra1 = r2 * pot
print('ra1', ra1)
print('ca1', ca1)
print('.')
cao2 = 750 * 10**(-9)
pot1 = 4.35
rao2 = 875
c2 = ufloat(cao2, cao2 * 0.002)
pot2 = 10 - pot1
pot3 = pot1 / pot2
pot = ufloat(pot3, pot3 * 0.005)
cb1 = c2 / pot
r2 = ufloat(rao2, rao2 * 0.03)
rb1 = r2 * pot
print('rb1', rb1)
print('cb1', cb1)
print('.')
cao2 = 399 * 10**(-9)
pot1 = 5.78
rao2 = 410
c2 = ufloat(cao2, cao2 * 0.002)
pot2 = 10 - pot1
pot3 = pot1 / pot2
pot = ufloat(pot3, pot3 * 0.005)
cc1 = c2 / pot
r2 = ufloat(rao2, rao2 * 0.03)
rc1 = r2 * pot
print('rc1', rc1)
print('cc1', cc1)
print('.')
c = np.array([unp.nominal_values(ca1), unp.nominal_values(cb1), unp.nominal_values(cc1)])
dc = np.array([unp.std_devs(ca1), unp.std_devs(cb1), unp.std_devs(cc1)])
r = np.array([unp.nominal_values(ra1), unp.nominal_values(rb1), unp.nominal_values(rc1)])
dr = np.array([unp.std_devs(ra1), unp.std_devs(rb1), unp.std_devs(rc1)])
np.savetxt('build/wert8.txt', np.column_stack([c, dc, r, dr]), header='c stdc r stdr')
c = c[1::]
r = r[1::]
print('c-mean', c.mean())
print('c-std', sem(c))
print('r-mean', r.mean())
print('r-std', sem(r))
print('Wert 8 ENDE')
print('====================')
print('Kondensator ENDE')
print('====================')
| 23.368056 | 89 | 0.608024 |
70d5472dbe1d12ec14a16666a43deb4ff0e8ae72
| 1,218 |
py
|
Python
|
python/data_sutram/scraper/test_im.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/data_sutram/scraper/test_im.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/data_sutram/scraper/test_im.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
# Mechanize headless
import mechanize
from cookielib import CookieJar
import urllib
# Browser
br = mechanize.Browser()
# Cookie Jar
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
# Browser options
br.set_handle_equiv(True)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# Want debugging messages?
#br.set_debug_http(True)
#br.set_debug_redirects(True)
#br.set_debug_responses(True)
# User-Agent (this is cheating, ok?)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
r = br.open('http://www.example.com/html-page-with-dynamic-image-embedded.html')
# Check if HTML content returned ok
if br.response().info()['Content-Type'] == 'text/html; charset=iso-8859-1':
# Now that the main page is loaded you can open the dynamic image
r = br.open('http://www.example.com/images/image.php')
# From here you just treat the image as you wish
png = r.read()
f = open('image-new-name.png', 'wb')
f.write(png)
f.close()
| 32.918919 | 137 | 0.731527 |
1ddf2922520f6aa4bfc3738376a3a0ecbbf5263e
| 855 |
py
|
Python
|
tests/integration/test_resource_groups.py
|
pcrete/skil-python
|
672a1aa9e8af020c960ab9ee280cbb6b194afc3f
|
[
"Apache-2.0"
] | 23 |
2018-09-19T13:34:27.000Z
|
2022-02-14T09:49:35.000Z
|
tests/integration/test_resource_groups.py
|
pcrete/skil-python
|
672a1aa9e8af020c960ab9ee280cbb6b194afc3f
|
[
"Apache-2.0"
] | 33 |
2018-10-18T07:58:05.000Z
|
2019-05-16T08:24:12.000Z
|
tests/integration/test_resource_groups.py
|
pcrete/skil-python
|
672a1aa9e8af020c960ab9ee280cbb6b194afc3f
|
[
"Apache-2.0"
] | 11 |
2018-10-21T18:58:57.000Z
|
2022-02-14T09:49:36.000Z
|
import pytest
import skil
import uuid
from skil.resources.groups import *
_sk = None
def _get_sk():
global _sk
if _sk is None:
_sk = skil.Skil()
return _sk
def test_group_add_and_deletion():
sk = _get_sk()
res = skil.resources.storage.S3(sk, "s3" + str(uuid.uuid1())[:6], "test_bucket",
"test_region", "test_credentials")
group = ResourceGroup(sk, str(uuid.uuid1())[:6])
group.add_resource(res)
groups = group.get_all_resources()
assert len(groups) == 1
assert groups[0].resource_id == res.resource_id
# TODO deletion does not seem to work. investigate why!
# group.delete_resource(res)
# groups = group.get_all_resources()
# assert len(groups) == 0
res.delete()
group.delete()
if __name__ == '__main__':
pytest.main([__file__])
| 20.357143 | 84 | 0.62924 |
38476eb7a96a95db3013a4cf28ef9c8b1792c0c4
| 1,766 |
py
|
Python
|
2-resources/_External-learning-resources/02-pyth/python-patterns-master/tests/creational/test_pool.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_External-learning-resources/02-pyth/python-patterns-master/tests/creational/test_pool.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_External-learning-resources/02-pyth/python-patterns-master/tests/creational/test_pool.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | 1 |
2021-11-05T07:48:26.000Z
|
2021-11-05T07:48:26.000Z
|
import queue
import unittest
from patterns.creational.pool import ObjectPool
class TestPool(unittest.TestCase):
def setUp(self):
self.sample_queue = queue.Queue()
self.sample_queue.put("first")
self.sample_queue.put("second")
def test_items_recoil(self):
with ObjectPool(self.sample_queue, True) as pool:
self.assertEqual(pool, "first")
self.assertTrue(self.sample_queue.get() == "second")
self.assertFalse(self.sample_queue.empty())
self.assertTrue(self.sample_queue.get() == "first")
self.assertTrue(self.sample_queue.empty())
def test_frozen_pool(self):
with ObjectPool(self.sample_queue) as pool:
self.assertEqual(pool, "first")
self.assertEqual(pool, "first")
self.assertTrue(self.sample_queue.get() == "second")
self.assertFalse(self.sample_queue.empty())
self.assertTrue(self.sample_queue.get() == "first")
self.assertTrue(self.sample_queue.empty())
class TestNaitivePool(unittest.TestCase):
"""def test_object(queue):
queue_object = QueueObject(queue, True)
print('Inside func: {}'.format(queue_object.object))"""
def test_pool_behavior_with_single_object_inside(self):
sample_queue = queue.Queue()
sample_queue.put("yam")
with ObjectPool(sample_queue) as obj:
# print('Inside with: {}'.format(obj))
self.assertEqual(obj, "yam")
self.assertFalse(sample_queue.empty())
self.assertTrue(sample_queue.get() == "yam")
self.assertTrue(sample_queue.empty())
# sample_queue.put('sam')
# test_object(sample_queue)
# print('Outside func: {}'.format(sample_queue.get()))
# if not sample_queue.empty():
| 33.961538 | 60 | 0.657984 |
38d12590f59234246f75d9aae0a5c6aa79e453a3
| 3,036 |
py
|
Python
|
year_2/prog_base_sem1_cw/app/api_1_0/users.py
|
honchardev/KPI
|
f8425681857c02a67127ffb05c0af0563a8473e1
|
[
"MIT"
] | null | null | null |
year_2/prog_base_sem1_cw/app/api_1_0/users.py
|
honchardev/KPI
|
f8425681857c02a67127ffb05c0af0563a8473e1
|
[
"MIT"
] | 21 |
2020-03-24T16:26:04.000Z
|
2022-02-18T15:56:16.000Z
|
year_2/prog_base_sem1_cw/app/api_1_0/users.py
|
honchardev/KPI
|
f8425681857c02a67127ffb05c0af0563a8473e1
|
[
"MIT"
] | null | null | null |
from flask import jsonify, request, current_app, url_for
from . import api
from ..models import User, Post
from .errors import bad_request
@api.route('/users/')
def get_users():
offset = request.args.get('offset') or 5
try:
offset = int(offset)
except:
return bad_request('Bad offset argument.')
users = User.query.order_by(User.id).slice(0, offset)
# If request has args on /users/
if request.args:
users_GotArg = []
users_all = User.query.all()
if request.args.get('fn'):
for user in users_all:
if user.name == request.args.get('fn'):
users_GotArg.append(user)
if request.args.get('un'):
for user in users_all:
if user.username.startswith(request.args.get('un')):
users_GotArg.append(user)
return jsonify(
{
'total': len(users_GotArg),
'users': [user.to_json() for user in users_GotArg]
}
)
# If it's just a regular request on /users/
return jsonify(
{
'total': User.query.count(),
'offset': offset,
'users': [user.to_json() for user in users]
}
)
@api.route('/users/<int:id>')
def get_user(id):
user = User.query.get_or_404(id)
return jsonify(user.to_json())
@api.route('/users/<int:id>/posts/')
def get_user_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page,
per_page=current_app.config['WIDT_POSTS_PER_PAGE'],
)
posts = pagination.items
prev_page = None
if pagination.has_prev:
prev_page = url_for('api.get_user_posts', id=id, page=page-1, _external=True)
next_page = None
if pagination.has_next:
next_page = url_for('api.get_user_posts', id=id, page=page+1, _external=True)
return jsonify(
{
'count': pagination.total,
'posts': [post.to_json() for post in posts],
'prev_page': prev_page,
'next_page': next_page,
}
)
@api.route('/users/<int:id>/feed/')
def get_user_followed_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.followed_posts.order_by(Post.timestamp.desc()).paginate(
page,
per_page=current_app.config['WIDT_POSTS_PER_PAGE']
)
posts = pagination.items
prev_page = None
if pagination.has_prev:
prev_page = url_for('api.get_user_followed_posts', id=id, page=page-1, _external=True)
next_page = None
if pagination.has_next:
next_page = url_for('api.get_user_followed_posts', id=id, page=page+1, _external=True)
return jsonify(
{
'posts': [post.to_json() for post in posts],
'count': pagination.total,
'prev_page': prev_page,
'next_page': next_page
}
)
| 30.979592 | 94 | 0.594203 |
aa749c417c666122123352558e10206592246cdc
| 2,889 |
py
|
Python
|
scripts/figA5.py
|
gavstrik/BC
|
2085872f1ff4241dcf4a5ef64eaa0601fb8ac454
|
[
"MIT"
] | null | null | null |
scripts/figA5.py
|
gavstrik/BC
|
2085872f1ff4241dcf4a5ef64eaa0601fb8ac454
|
[
"MIT"
] | null | null | null |
scripts/figA5.py
|
gavstrik/BC
|
2085872f1ff4241dcf4a5ef64eaa0601fb8ac454
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pandas as pd
from scipy.stats import mode, ks_2samp, mannwhitneyu, shapiro, pearsonr
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "sans-serif"
PLOTS_DIR = '../plots'
"""
histograms of guess distributions partitioned into groups and rounds.
"""
datafile_AMT = pd.read_csv('../data/amt.csv')
def most_common_numbers(array):
first = mode(array)[0][0]
array = array[array != first]
second = mode(array)[0][0]
array = array[array != second]
third = mode(array)[0][0]
array = array[array != third]
fourth = mode(array)[0][0]
array = array[array != fourth]
fifth = mode(array)[0][0]
return first, second, third, fourth, fifth
def plot_histograms(df_AMT):
fig, axes = plt.subplots(nrows=4, ncols=2, sharex=True, sharey=True,
figsize=(8,8))
axes = axes.flatten()
bins = np.linspace(-0.5, 100.5, 102)
colors = ['#4575b4', 'orange', 'red']
df_2 = df_AMT[df_AMT['name'] == 'GoR_'+str(2)]
df_4 = df_AMT[df_AMT['name'] == 'GoR_'+str(4)]
df_8 = df_AMT[df_AMT['name'] == 'GoR_'+str(8)]
for i in range(1,len(axes)+1):
data = df_AMT[df_AMT['round'] == i].guess.values.astype(int)
data_2 = df_2[df_2['round'] == i].guess.values.astype(int)
data_4 = df_4[df_4['round'] == i].guess.values.astype(int)
data_8 = df_8[df_8['round'] == i].guess.values.astype(int)
axes[i-1].hist([data_2, data_4, data_8],
stacked=True, density=True, color=colors, bins=bins,
label=['2 players', '4 players', '8 players'])
# axes[i-1].hist(data, bins=bins, density=True, color=colors[0])
axes[i-1].set_title('round '+str(i), fontsize='smaller')
avg = np.around(np.mean(data),2)
first, second, third, fourth, fifth = most_common_numbers(data)
N = len(data)
axes[i-1].annotate("mean = "+str(avg)+'\nmode = '+str(first),
xy=(0.7, 0.9), xycoords='axes fraction', fontsize=7,
horizontalalignment='left', verticalalignment='top')
# plotting paraphernalia
# Set the ticks and ticklabels for all axes
plt.setp(axes, xticks=[0, 22, 33, 50, 100], xticklabels=[0, 22, 33, 50, 100])
for ax in axes:
ax.set_xticklabels([0, 22, 33, 50, 100], fontsize='x-small')
ax.set_yticklabels([0,0.1,0.2], fontsize='x-small')
ax.legend(fontsize='x-small', loc='upper left')
fig.tight_layout()
# Remember: save as pdf and transparent=True for Adobe Illustrator
if not os.path.exists(PLOTS_DIR):
os.makedirs(PLOTS_DIR)
plt.savefig(os.path.join(PLOTS_DIR, 'figA5.png'), transparent=True, dpi=300)
plt.savefig(os.path.join(PLOTS_DIR, 'figA5.pdf'), transparent=True, dpi=300)
plt.show()
# main code
df_AMT = pd.DataFrame(datafile_AMT)
plot_histograms(df_AMT)
| 36.56962 | 81 | 0.619592 |
2d55c635179769de07550c25a60f02140a27ba4a
| 750 |
py
|
Python
|
elements/python/7/6/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | 11 |
2019-02-08T06:54:34.000Z
|
2021-08-07T18:57:39.000Z
|
elements/python/7/6/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | 1 |
2019-05-21T08:14:10.000Z
|
2019-05-21T08:14:10.000Z
|
elements/python/7/6/soln.py
|
mmcloughlin/problems
|
6095842ffe007a12ec8c2093850515aa4e046616
|
[
"MIT"
] | null | null | null |
def say(digits):
cur = digits[0]
run = 1
result = ''
for digit in digits[1:]:
if digit == cur:
run += 1
continue
result += str(run) + cur
cur = digit
run = 1
result += str(run) + cur
return result
def look_and_say(n):
digits = '1'
for _ in xrange(n):
digits = say(digits)
return digits
def test():
expect = [
1,
11,
21,
1211,
111221,
312211,
13112221,
1113213211,
]
for n, digits in enumerate(expect):
assert look_and_say(n) == str(digits)
print 'pass'
def main():
test()
if __name__ == '__main__':
main()
| 16.666667 | 45 | 0.449333 |
933ad89a120b4cce5b993027be90d01878672a18
| 1,804 |
py
|
Python
|
tests/data/test_load.py
|
MatPoliquin/retro
|
c70c174a9818d1e97bc36e61abb4694d28fc68e1
|
[
"MIT-0",
"MIT"
] | 2,706 |
2018-04-05T18:28:50.000Z
|
2022-03-29T16:56:59.000Z
|
tests/data/test_load.py
|
MatPoliquin/retro
|
c70c174a9818d1e97bc36e61abb4694d28fc68e1
|
[
"MIT-0",
"MIT"
] | 242 |
2018-04-05T22:30:42.000Z
|
2022-03-19T01:55:11.000Z
|
tests/data/test_load.py
|
MatPoliquin/retro
|
c70c174a9818d1e97bc36e61abb4694d28fc68e1
|
[
"MIT-0",
"MIT"
] | 464 |
2018-04-05T19:10:34.000Z
|
2022-03-28T13:33:32.000Z
|
import retro
import pytest
import gc
import gzip
import os
import zlib
from retro.testing import game, handle
from concurrent.futures import ProcessPoolExecutor, TimeoutError
from concurrent.futures.process import BrokenProcessPool
pool = ProcessPoolExecutor(1)
@pytest.fixture(scope="module")
def processpool():
def run(fn, *args):
global pool
try:
future = pool.submit(fn, *args)
return future.result(2)
except BrokenProcessPool:
pool = ProcessPoolExecutor(1)
return [], [(args[0], 'subprocess crashed')]
except TimeoutError:
return [], [(args[0], 'task timed out')]
yield run
pool.shutdown()
def load(game, inttype):
errors = []
rom = retro.data.get_romfile_path(game, inttype)
emu = retro.RetroEmulator(rom)
emu.step()
del emu
gc.collect()
return [], errors
def state(game, inttype):
errors = []
states = retro.data.list_states(game, inttype)
if not states:
return [], []
rom = retro.data.get_romfile_path(game, inttype | retro.data.Integrations.STABLE)
emu = retro.RetroEmulator(rom)
for statefile in states:
try:
with gzip.open(retro.data.get_file_path(game, statefile + '.state', inttype), 'rb') as fh:
state = fh.read()
except (IOError, zlib.error):
errors.append((game, 'state failed to decode: %s' % statefile))
continue
emu.set_state(state)
emu.step()
del emu
gc.collect()
return [], errors
def test_load(game, processpool):
warnings, errors = processpool(load, *game)
handle(warnings, errors)
def test_state(game, processpool):
warnings, errors = processpool(state, *game)
handle(warnings, errors)
| 23.736842 | 102 | 0.632483 |
fac84c6b38588a5252beeb3fd5a41ce1849f6c94
| 2,851 |
py
|
Python
|
weibo/login/weiboLogin.py
|
haiboz/weiboSpider
|
517cae2ef3e7bccd9e1d328a40965406707f5362
|
[
"Apache-2.0"
] | null | null | null |
weibo/login/weiboLogin.py
|
haiboz/weiboSpider
|
517cae2ef3e7bccd9e1d328a40965406707f5362
|
[
"Apache-2.0"
] | null | null | null |
weibo/login/weiboLogin.py
|
haiboz/weiboSpider
|
517cae2ef3e7bccd9e1d328a40965406707f5362
|
[
"Apache-2.0"
] | null | null | null |
#encoding:utf8
'''
Created on 2016年4月11日
@author: wb-zhaohaibo
'''
import urllib2
import cookielib
import WeiboEncode
import WeiboSearch
class WeiboLogin(object):
def __init__(self, user, pwd, enableProxy = False):
"初始化WeiboLogin,enableProxy表示是否使用代理服务器,默认关闭"
print "初始化登录程序..."
self.userName = user
self.passWord = pwd
self.enableProxy = enableProxy
self.serverUrl = "http://login.sina.com.cn/sso/prelogin.php?entry=weibo&callback=sinaSSOController.preloginCallBack&su=&rsakt=mod&client=ssologin.js(v1.4.11)&_=1379834957683"
self.loginUrl = "http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.11)"
self.postHeader = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:24.0) Gecko/20100101 Firefox/24.0'}
def Login(self):
"登陆程序"
self.EnableCookie(self.enableProxy)#cookie或代理服务器配置
serverTime, nonce, pubkey, rsakv = self.GetServerTime()#登陆的第一步
postData = WeiboEncode.PostEncode(self.userName, self.passWord, serverTime, nonce, pubkey, rsakv)#加密用户和密码
print "Post data length:\n", len(postData)
req = urllib2.Request(self.loginUrl, postData, self.postHeader)
print "Posting request..."
result = urllib2.urlopen(req)#登陆的第二步——解析新浪微博的登录过程中3
text = result.read()
try:
loginUrl = WeiboSearch.sRedirectData(text)#解析重定位结果
urllib2.urlopen(loginUrl)
except:
print 'Login error!'
return False
print 'Login sucess!'
return True
def EnableCookie(self, enableProxy):
"Enable cookie & proxy (if needed)."
cookiejar = cookielib.LWPCookieJar()#建立cookie
cookie_support = urllib2.HTTPCookieProcessor(cookiejar)
if enableProxy:
proxy_support = urllib2.ProxyHandler({'http':'http://xxxxx.pac'})#使用代理
opener = urllib2.build_opener(proxy_support, cookie_support, urllib2.HTTPHandler)
print "Proxy enabled"
else:
opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
urllib2.install_opener(opener)#构建cookie对应的opener
def GetServerTime(self):
"Get server time and nonce, which are used to encode the password"
print "Getting server time and nonce..."
serverData = urllib2.urlopen(self.serverUrl).read()#得到网页内容
print "serverData="+serverData
try:
serverTime, nonce, pubkey, rsakv = WeiboSearch.sServerData(serverData)#解析得到serverTime,nonce等
return serverTime, nonce, pubkey, rsakv
except:
print 'Get server time & nonce error!'
return None
| 38.527027 | 183 | 0.617327 |
e21134e8d2914688a76a8130c25ea39f097db91e
| 3,018 |
py
|
Python
|
7_DeepLearning-GANs/04_Generative_Adversarial_Attacks/targeted_attack.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
7_DeepLearning-GANs/04_Generative_Adversarial_Attacks/targeted_attack.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
7_DeepLearning-GANs/04_Generative_Adversarial_Attacks/targeted_attack.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
'''
Generative Adversarial Attacks - den Output des Netes beeinflussen
(z.B. andere Klasse ausgeben als eigentlich ist) durch Noise über ganzes Bild was für das menschliche Auge nicht sichtbar ist,
oder anbringen von Patches (kleine Bilder die dann im Vordergrund liegen) welche dann eine
z.B. andere Klassifizierung bewirken.
targeted - mit bestimmte "erzwungene" Klasse
'''
import tensorflow as tf
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
import numpy as np
import matplotlib.pyplot as plt
from mnistCnn import *
from mnistData import *
from plotting import *
mnist_data = MNIST()
x_train, y_train = mnist_data.get_train_set()
x_test, y_test = mnist_data.get_test_set()
def adversarial_noise(model, image, label):
with tf.GradientTape() as tape:
tape.watch(image)
prediction = model(image, training=False)[0]
loss = tf.keras.losses.categorical_crossentropy(label, prediction)
# Get the gradients of the loss w.r.t to the input image.
gradient = tape.gradient(loss, image)
# Get the sign of the gradients to create the noise
signed_grad = tf.sign(gradient)
return signed_grad
if __name__ == "__main__":
cnn = build_cnn()
lr = 0.0005
optimizer = Adam(lr=lr)
cnn.compile(
loss="categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
#cnn.fit(x_train, y_train, verbose=1,
# batch_size=256, epochs=10,
# validation_data=(x_test, y_test))
path = "/home/felix/Desktop/DeepLearning/7_DeepLearning-GANs/04_Generative_Adversarial_Attacks/weights/mnist_cnn.h5"
#cnn.save_weights(path)
cnn.load_weights(path)
#score = cnn.evaluate(x_test, y_test, verbose=0)
#print("Test accuracy: ", score[1])
sample_idx = np.random.randint(low=0, high=x_test.shape[0])
image = np.array([x_test[sample_idx]])
true_label = y_test[sample_idx]
true_label_idx = np.argmax(true_label)
y_pred = cnn.predict(image)[0]
print("Right class: ", true_label_idx)
print("Prob. right class: ", y_pred[true_label_idx])
eps = 0.001 # Stärke des Noise filters pro Schritt
image_adv = tf.convert_to_tensor(image, dtype=tf.float32) # Bild in Tensor umwandeln
target_label_idx = 9
target_label = tf.one_hot(target_label_idx, 10)
while (np.argmax(y_pred) != target_label_idx):
# image_adv = image_adv + eps * noise
noise = adversarial_noise(cnn, image_adv, true_label)
if np.sum(noise) == 0.0:
break
image_adv = image_adv - eps * noise
image_adv = tf.clip_by_value(image_adv, 0, 1)
y_pred = cnn.predict(image_adv)[0]
print("Prob. right class: ", y_pred[true_label_idx])
print("Prob. target class: ", y_pred[target_label_idx], "\n")
plot_img(image_adv.numpy(), cmap="gray")
plot_img(noise.numpy(), cmap="gray")
| 37.725 | 127 | 0.678595 |
10645f967cb90505e0a8d95a0c49e24a7b4bbd9b
| 8,786 |
py
|
Python
|
research/cv/DDRNet/src/data/augment/transforms.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/DDRNet/src/data/augment/transforms.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/DDRNet/src/data/augment/transforms.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""image transformer"""
import math
import random
import warnings
import numpy as np
from PIL import Image
from PIL import ImageEnhance, ImageOps
class ShearX:
def __init__(self, fillcolor=(128, 128, 128)):
self.fillcolor = fillcolor
def __call__(self, x, magnitude):
return x.transform(
x.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
Image.BICUBIC, fillcolor=self.fillcolor)
class ShearY:
def __init__(self, fillcolor=(128, 128, 128)):
self.fillcolor = fillcolor
def __call__(self, x, magnitude):
return x.transform(
x.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
Image.BICUBIC, fillcolor=self.fillcolor)
class TranslateX:
def __init__(self, fillcolor=(128, 128, 128)):
self.fillcolor = fillcolor
def __call__(self, x, magnitude):
return x.transform(
x.size, Image.AFFINE, (1, 0, magnitude * x.size[0] * random.choice([-1, 1]), 0, 1, 0),
fillcolor=self.fillcolor)
class TranslateY:
def __init__(self, fillcolor=(128, 128, 128)):
self.fillcolor = fillcolor
def __call__(self, x, magnitude):
return x.transform(
x.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * x.size[1] * random.choice([-1, 1])),
fillcolor=self.fillcolor)
class Rotate:
# from https://stackoverflow.com/questions/
# 5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand
def __call__(self, x, magnitude):
rot = x.convert("RGBA").rotate(magnitude)
return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(x.mode)
class Color:
def __call__(self, x, magnitude):
return ImageEnhance.Color(x).enhance(1 + magnitude * random.choice([-1, 1]))
class Posterize:
def __call__(self, x, magnitude):
return ImageOps.posterize(x, magnitude)
class Solarize:
def __call__(self, x, magnitude):
return ImageOps.solarize(x, magnitude)
class Contrast:
def __call__(self, x, magnitude):
return ImageEnhance.Contrast(x).enhance(1 + magnitude * random.choice([-1, 1]))
class Sharpness:
def __call__(self, x, magnitude):
return ImageEnhance.Sharpness(x).enhance(1 + magnitude * random.choice([-1, 1]))
class Brightness:
def __call__(self, x, magnitude):
return ImageEnhance.Brightness(x).enhance(1 + magnitude * random.choice([-1, 1]))
class AutoContrast:
def __call__(self, x, magnitude):
return ImageOps.autocontrast(x)
class Equalize:
def __call__(self, x, magnitude):
return ImageOps.equalize(x)
class Invert:
def __call__(self, x, magnitude):
return ImageOps.invert(x)
class ToNumpy:
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.rollaxis(np_img, 2) # HWC to CHW
return np_img
_pil_interpolation_to_str = {
Image.NEAREST: 'PIL.Image.NEAREST',
Image.BILINEAR: 'PIL.Image.BILINEAR',
Image.BICUBIC: 'PIL.Image.BICUBIC',
Image.LANCZOS: 'PIL.Image.LANCZOS',
Image.HAMMING: 'PIL.Image.HAMMING',
Image.BOX: 'PIL.Image.BOX',
}
def _pil_interp(method):
"""_pil_interp"""
if method == 'bicubic':
output = Image.BICUBIC
elif method == 'lanczos':
output = Image.LANCZOS
elif method == 'hamming':
output = Image.HAMMING
else:
# default bilinear, do we want to allow nearest?
output = Image.BILINEAR
return output
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
class Resize:
"""Resize"""
def __init__(self, size, interpolation='bilinear'):
if isinstance(size, (list, tuple)):
self.size = tuple(size)
else:
self.size = (size, size)
self.interpolation = _pil_interp(interpolation)
def __call__(self, img):
img = img.resize(self.size, self.interpolation)
return img
class RandomResizedCropAndInterpolation:
"""Crop the given PIL Image to random size and aspect ratio with random interpolation.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
interpolation='bilinear'):
if isinstance(size, (list, tuple)):
self.size = tuple(size)
else:
self.size = (size, size)
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
if interpolation == 'random':
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = _pil_interp(interpolation)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for _ in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return j, i, w, h
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return j, i, w, h
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
left, top, width, height = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
img = img.crop((left, top, left + width, top + height))
img = img.resize(self.size, interpolation)
return img
def __repr__(self):
if isinstance(self.interpolation, (tuple, list)):
interpolate_str = ' '.join([_pil_interpolation_to_str[x] for x in self.interpolation])
else:
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0})'.format(interpolate_str)
return format_string
| 32.420664 | 98 | 0.6112 |
10a82b2d11be8d842a3706c5be82cc90600973cf
| 1,673 |
py
|
Python
|
TranspaGOOGLE-master/TanspaGOOGLE.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
TranspaGOOGLE-master/TanspaGOOGLE.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
TranspaGOOGLE-master/TanspaGOOGLE.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
#!/usr/bin/env python3
#github.com/AngelSecurityTeam/TranspaGOOGLE
import optparse
import sys
import requests
import json
tmpDomainList=[]
def lookup(domainName):
nextLink=''
url='https://transparencyreport.google.com/transparencyreport/api/v3/httpsreport/ct/certsearch?include_expired=true&include_subdomains=true&domain='+domainName
content = requests.get(url)
lines=(content.text).split("\n")
contentStr=""
for x in lines:
x=x.strip()
if x!=")]}'":
contentStr+=x
data = json.loads(contentStr)
x=0
while x<len((data[0][1])):
foundDomain=data[0][1][x][1]
if foundDomain not in tmpDomainList:
print(foundDomain)
tmpDomainList.append(foundDomain)
x+=1
nextLink=data[0][3][1]
return nextLink
def lookupNextPage(tmpLink):
url='https://transparencyreport.google.com/transparencyreport/api/v3/httpsreport/ct/certsearch/page?p='+tmpLink
content = requests.get(url)
lines=(content.text).split("\n")
contentStr=""
for x in lines:
x=x.strip()
if x!=")]}'":
contentStr+=x
data = json.loads(contentStr)
x=0
while x<len((data[0][1])):
foundDomain=data[0][1][x][1]
if foundDomain not in tmpDomainList:
print(foundDomain)
tmpDomainList.append(foundDomain)
x+=1
nextLink=data[0][3][1]
return nextLink
parser = optparse.OptionParser()
parser.add_option('-d', action="store", dest="domainName")
options, remainder = parser.parse_args()
if options.domainName:
domainName=options.domainName
nextLink=lookup(domainName)
try:
while len(nextLink)>0:
if len(nextLink)>0:
nextLink=lookupNextPage(nextLink)
except TypeError:
pass
else:
print("[!] Please provide a domain name using the -d argument")
sys.exit()
| 24.602941 | 160 | 0.723849 |
52ec494954ac43a325093f841ee5efbe521270b6
| 3,942 |
py
|
Python
|
back-end/src/analysis/NeuralNet/655/nn_mapper.py
|
akshah/iodb
|
80fbad1cb639e2cad304d6565cf4918ee5b4e4c0
|
[
"Apache-2.0"
] | null | null | null |
back-end/src/analysis/NeuralNet/655/nn_mapper.py
|
akshah/iodb
|
80fbad1cb639e2cad304d6565cf4918ee5b4e4c0
|
[
"Apache-2.0"
] | null | null | null |
back-end/src/analysis/NeuralNet/655/nn_mapper.py
|
akshah/iodb
|
80fbad1cb639e2cad304d6565cf4918ee5b4e4c0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Adapted from an example by Michael G. Noll at:
#
# http://www.michael-noll.com/wiki/Writing_An_Hadoop_MapReduce_Program_In_Python
#
'''
import sys, urllib, re
import numpy as np
# Read pairs as lines of input from STDIN
#for line in sys.stdin:
# We assume that we are fed a series of URLs, one per line
# url = line.strip()
# Fetch the content and output the title (pairs are tab-delimited)
dataOriginal= np.loadtxt('wine.data')
print "Shah","\t", "Anant"
'''
import hashlib
import Image
import sys
data=list()
imagelist=list()
imgname=list()
for line in sys.stdin:
dataline = line.strip()
imgname=dataline.split("\t")
#imagelist.append(dataline)
imagelist.append(imgname[1])
#print "Keyr1","\tThis is length",len(imagelist)
#print "Keyr1","\t",name[1]
def md5Checksum(filePath):
fh = open(filePath, 'rb')
m = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
m.update(data)
return m.hexdigest()
for z in range(len(imagelist)):
#print 'The MD5 checksum of ',imagelist[i],'is ', md5Checksum(imagelist[i])
im = Image.open(imagelist[z])
pix= im.load()
size= im.size
#print 'Column: ',size[0],'Rows :',size[1]
#print pix[100,100][1]
#calculate row mean
rowmeanR=list()
rowmeanG=list()
rowmeanB=list()
csumr=0
csumg=0
csumb=0
for r in range(size[1]):
csumr=0
csumg=0
csumb=0
for c in range(size[0]):
csumr=csumr+pix[c,r][0]
csumg=csumg+pix[c,r][1]
csumb=csumb+pix[c,r][2]
rowmeanR.append(csumr/size[0])
rowmeanG.append(csumg/size[0])
rowmeanB.append(csumb/size[0])
csumr=0
csumg=0
csumb=0
for i in range(len(rowmeanR)):
csumr=csumr+rowmeanR[i]
for i in range(len(rowmeanG)):
csumg=csumg+rowmeanG[i]
for i in range(len(rowmeanB)):
csumb=csumb+rowmeanB[i]
ROWMEAN_R=csumr/len(rowmeanR)
ROWMEAN_G=csumg/len(rowmeanG)
ROWMEAN_B=csumb/len(rowmeanB)
#calculate column mean
colmeanR=list()
colmeanG=list()
colmeanB=list()
rsumr=0
rsumg=0
rsumb=0
for c in range(size[0]):
rsumr=0
rsumg=0
rsumb=0
for r in range(size[1]):
rsumr=rsumr+pix[c,r][0]
rsumg=rsumg+pix[c,r][1]
rsumb=rsumb+pix[c,r][2]
colmeanR.append(rsumr/size[1])
colmeanG.append(rsumg/size[1])
colmeanB.append(rsumb/size[1])
rsumr=0
rsumg=0
rsumb=0
for i in range(len(colmeanR)):
rsumr=rsumr+colmeanR[i]
for i in range(len(colmeanG)):
rsumg=rsumg+colmeanG[i]
for i in range(len(colmeanB)):
rsumb=rsumb+colmeanB[i]
COLMEAN_R=rsumr/len(colmeanR)
COLMEAN_G=rsumg/len(colmeanG)
COLMEAN_B=rsumb/len(colmeanB)
#calculate DCT Row mean
dctrowmeanR=list()
dctrowmeanG=list()
dctrowmeanB=list()
csumr=0
csumg=0
csumb=0
for r in range(size[1]):
csumr=0
csumg=0
csumb=0
for c in range(size[0]):
csumr=csumr+pix[c,r][0]
csumg=csumg+pix[c,r][1]
csumb=csumb+pix[c,r][2]
dctrowmeanR.append(csumr/size[0])
dctrowmeanG.append(csumg/size[0])
dctrowmeanB.append(csumb/size[0])
csumr=0
csumg=0
csumb=0
for i in range(len(dctrowmeanR)):
csumr=csumr+dctrowmeanR[i]
for i in range(len(dctrowmeanG)):
csumg=csumg+dctrowmeanG[i]
for i in range(len(dctrowmeanB)):
csumb=csumb+dctrowmeanB[i]
DCT_ROWMEAN_R=csumr/len(dctrowmeanR)
DCT_ROWMEAN_G=csumg/len(dctrowmeanG)
DCT_ROWMEAN_B=csumb/len(dctrowmeanB)
#select category
category=1
tokens = imagelist[z].split('.')
if int(tokens[0]) >= 1:
if int(tokens[0]) < 6:
category=1
if (int(tokens[0]) >= 6) and (int(tokens[0]) < 11):
category=2
if (int(tokens[0]) >= 11) and (int(tokens[0]) < 16):
category=3
#print features of this image
#print category,"\t",category,",",ROWMEAN_R,",",ROWMEAN_G,",",ROWMEAN_B,",",COLMEAN_R,",",COLMEAN_G,",",COLMEAN_B
outstr=str(category)+","+str(ROWMEAN_R)+","+str(ROWMEAN_G)+","+str(ROWMEAN_B)+","+str(COLMEAN_R)+","+str(COLMEAN_G)+","+str(COLMEAN_B)
print "keyr1","\t",outstr
print "keyr2","\t",outstr
print "keyr3","\t",outstr
| 23.188235 | 135 | 0.676053 |
0dea3ae22e010767a2506547182f23b3b038a948
| 481 |
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/ch06_arrays/intro/intro_numpy_size.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch06_arrays/intro/intro_numpy_size.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch06_arrays/intro/intro_numpy_size.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import numpy as np
import sys
numbers = [i for i in range(100_000)]
print("Size of each element: ", sys.getsizeof(numbers[0]))
print(numbers[99_999])
print("Size of the list: ", sys.getsizeof(numbers))
numbers_array = np.arange(100_000)
print("Size of each element: ", numbers_array.itemsize)
print(numbers_array[99_999])
print("Size of the Numpy array: ", sys.getsizeof(numbers_array))
| 22.904762 | 64 | 0.746362 |
33f43d8dd75183d8b94d8185574855e2f615c4a2
| 6,834 |
py
|
Python
|
isj_proj05.py
|
SnasiCze/ISJ
|
2284cb0d53aad5dd0bfc6230224700628be9e454
|
[
"MIT"
] | null | null | null |
isj_proj05.py
|
SnasiCze/ISJ
|
2284cb0d53aad5dd0bfc6230224700628be9e454
|
[
"MIT"
] | null | null | null |
isj_proj05.py
|
SnasiCze/ISJ
|
2284cb0d53aad5dd0bfc6230224700628be9e454
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# DEKLARACE ZÁSTUPNÝCH PROMNĚNÝCH
NULA=0
JEDNA=1
DVA=2
# Konec deklarace
''' Třída Polynomial pro zpracování ruzných polynomů, dle zadání '''
class Polynomial:
''' Definife funkce __INIT__, uprava dat pro další operace/funkce '''
def __init__(self, *args, **arg): # Definitce funkce __INIT__
self.polynom = [] # Deklarace
if args: # podmínka na zjistění jestli byl vložen list
if len(args) != NULA and type(args[NULA]) == list: self.polynom = args[NULA].copy() # pokud to je list a není nulový Tak se skopíruje hlubokou kopii
else: [self.polynom.append(i) for i in args] # projetí všech hodnot a přidání hodnoty self.polynom
if arg: # jestli že to není list
for i in list(arg): # projetí všech klíču
if arg[i] != NULA: # když se nerovná nule
while len(self.polynom) <= (int(i[JEDNA:])): self.polynom.append(NULA) # projetí všech položek přidání hodnot
self.polynom[int(i[JEDNA:])] = arg[i] # přidání zbytku a kontrola
def __str__(self):
''' Funkce pro tisk která upravu výstupní data '''
vysledek = "" # deklarace
if not len(self.polynom): return "0" # jestliže je pole nulové vrátí nulu
for i, koefi in enumerate(reversed(self.polynom)): # projetí všech hodnot
if koefi: # koeficiont
if koefi < NULA: znam, koefi = (' - ' if vysledek else '- '), -koefi # jestli že je konficient záporný
elif koefi > NULA: znam = (' + ' if vysledek else '') #jestli že je kladný
str_koefi = '' if koefi == JEDNA and (len(self.polynom) - (i + JEDNA)) != NULA else str(koefi)
if (len(self.polynom) - (i + JEDNA)) == NULA: str_mocnina = '' # jestliže je mocnina rovná nule
elif (len(self.polynom) - (i + JEDNA)) == JEDNA: str_mocnina = 'x' # jestliže je mocnina rovná 1
else: str_mocnina = ('x' + '^' + str((len(self.polynom) - (i + JEDNA)))) # výpočet
vysledek += (znam + str_koefi + str_mocnina) # výpočet
return vysledek
def __eq__(self, dalsi):
''' porovnání na zjištění velkosti '''
if type(dalsi) == Polynomial and self.polynom == dalsi.polynom: return True # podmínka jestli je to Polynomial a zd podmínka zda-li se rovnají
return False
def __add__(self, dalsi):
''' Přídání jednoho polinomu do ruhého nebo opačně dle délky '''
list = [] # deklarace listu
if len(self.polynom) > len(dalsi.polynom): [dalsi.polynom.append(NULA) for i in range((len(self.polynom) - len(dalsi.polynom)))] # podmínka na zjistění velikosti, přidání hodnot do
else: [self.polynom.append(NULA) for i in range((len(dalsi.polynom) - len(self.polynom)))] # přidání hodnot do
[list.append(self.polynom[i] + dalsi.polynom[i]) for i in range(len(self.polynom))] # projetí a sloužení do jedného
return Polynomial(list) # návrat list
def __mul__(self, dalsi):
''' násobení polynomu a návrat jeho nové hodnoty '''
list = ([NULA] * ((len(self.polynom) + len(dalsi.polynom)) - JEDNA)) # vyvtoření pole
for i in range(len(self.polynom)): # projetí 2x cyklu
for j in range(len(dalsi.polynom)): list[i + j] += (self.polynom[i] * dalsi.polynom[j]) # naplnění pole vypočtem
return Polynomial(list) # návrat listu
def __pow__(self, mocnina):
''' projíždí je je mocnina a podle toho vykoná operaci nad daty v polinomu '''
if mocnina == NULA: return Polynomial(JEDNA) # jestli je mocnina =0, return 1
elif mocnina >= JEDNA: # porovnání jestli je mocnina větší než 0
if mocnina == JEDNA: return self # jestli je mocnina = 1, return to co přišlo do funkce
else:
vysledek = self # první číslo
mocnina += JEDNA # upravení mocniy
for i in range(DVA, mocnina): vysledek = (vysledek * self) # naplnění zbytkem čísel
return vysledek # návrat hodnoty
return NULA
def derivative(self):
''' Funcke pro derivaci polynomu '''
derivace = [] # Deklarace listu pro výsldek
[derivace.append((self.polynom[i] * i)) for i in range(JEDNA, len(self.polynom))] # Projetí pole od druhého indexu a zdeerivace u všech položek a návrat do listu
return Polynomial(derivace) # Návrat po derivaci
def at_value(self, *arg):
''' Funkce pro přidání hodnoty '''
hodnota =NULA # deklarace
hodnota1=NULA # deklarace
if len(arg) == JEDNA: # velikost argumentu == 1
for i in range(len(self.polynom)): hodnota += (self.polynom[i] * (arg[NULA] ** i)) # přiřazení všechn hodnot do listu
return hodnota # návrat honoty
elif len(arg) == DVA: # velikost argumentu == 2
for i in range(len(self.polynom)): # projetí pole
hodnota += (self.polynom[i] * (arg[NULA] ** i)) # přiřazení do první
hodnota1 += (self.polynom[i] * (arg[(NULA+JEDNA)] ** i)) # přiřazení do druhé
return (hodnota1 - hodnota) # návrat rodílu
else: return NULA # nevyhovující stav
def test():
''' Funkce pro otestování všech funkcí ve tříde Polynomial '''
assert str(Polynomial(0,1,0,-1,4,-2,0,1,3,0)) == "3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x"
assert str(Polynomial([-5,1,0,-1,4,-2,0,1,3,0])) == "3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x - 5"
assert str(Polynomial(x7=1, x4=4, x8=3, x9=0, x0=0, x5=-2, x3= -1, x1=1)) == "3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x"
assert str(Polynomial(x2=0)) == "0"
assert str(Polynomial(x0=0)) == "0"
assert Polynomial(x0=2, x1=0, x3=0, x2=3) == Polynomial(2,0,3)
assert Polynomial(x2=0) == Polynomial(x0=0)
assert str(Polynomial(x0=1)+Polynomial(x1=1)) == "x + 1"
assert str(Polynomial([-1,1,1,0])+Polynomial(1,-1,1)) == "2x^2"
pol1 = Polynomial(x2=3, x0=1)
pol2 = Polynomial(x1=1, x3=0)
assert str(pol1+pol2) == "3x^2 + x + 1"
assert str(pol1+pol2) == "3x^2 + x + 1"
assert str(Polynomial(x0=-1,x1=1)**1) == "x - 1"
assert str(Polynomial(x0=-1,x1=1)**2) == "x^2 - 2x + 1"
pol3 = Polynomial(x0=-1,x1=1)
assert str(pol3**4) == "x^4 - 4x^3 + 6x^2 - 4x + 1"
assert str(pol3**4) == "x^4 - 4x^3 + 6x^2 - 4x + 1"
assert str(Polynomial(x0=2).derivative()) == "0"
assert str(Polynomial(x3=2,x1=3,x0=2).derivative()) == "6x^2 + 3"
assert str(Polynomial(x3=2,x1=3,x0=2).derivative().derivative()) == "12x"
pol4 = Polynomial(x3=2,x1=3,x0=2)
assert str(pol4.derivative()) == "6x^2 + 3"
assert str(pol4.derivative()) == "6x^2 + 3"
assert Polynomial(-2,3,4,-5).at_value(0) == -2
assert Polynomial(x2=3, x0=-1, x1=-2).at_value(3) == 20
assert Polynomial(x2=3, x0=-1, x1=-2).at_value(3,5) == 44
pol5 = Polynomial([1,0,-2])
assert pol5.at_value(-2.4) == -10.52
assert pol5.at_value(-2.4) == -10.52
assert pol5.at_value(-1,3.6) == -23.92
assert pol5.at_value(-1,3.6) == -23.92
if __name__ == '__main__':
test()
| 54.238095 | 182 | 0.621598 |
d5433558712390b999d7237fe0aa37ae4a88e6d8
| 6,540 |
py
|
Python
|
03_SDN/ext/helper.py
|
kit-tm/Labs
|
8e5af3a76be6e17bdb62fd3175ecc545ed6e9147
|
[
"BSD-2-Clause"
] | null | null | null |
03_SDN/ext/helper.py
|
kit-tm/Labs
|
8e5af3a76be6e17bdb62fd3175ecc545ed6e9147
|
[
"BSD-2-Clause"
] | null | null | null |
03_SDN/ext/helper.py
|
kit-tm/Labs
|
8e5af3a76be6e17bdb62fd3175ecc545ed6e9147
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 Robert Bauer ([email protected])
#
# A small helper class providing some basic wrapper functions
# for simple openflow control handling with the POX-Controller.
# See aufgabe0.py for an simple example of how to use this file.
#
# Copyright (c) 2015,
# Karlsruhe Institute of Technology, Institute of Telematics
# Zirkel 2, 76131 Karlsruhe
# Germany
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# IMPORTANT: This file uses the POX-Controller (https://github.com/noxrepo/pox)
# which is published under the Apache License, Version 2.0. No modifications
# to the POX-Controller were made, except for the additional files in the
# pox/ext folder.
#
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpidToStr
from pox.lib.packet.ethernet import ethernet, ETHER_BROADCAST
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.arp import arp
from pox.lib.addresses import IPAddr, IPAddr6, EthAddr
class MyPacket():
def __init__(self, event):
self.event = event;
self.dpid = event.connection.dpid;
self.inport = event.port;
def send_to_port(self, port):
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port = port))
msg.data = self.event.ofp
msg.in_port = self.event.port
self.event.connection.send(msg)
def flood(self):
self.send_to_port(of.OFPP_FLOOD)
def get_inport(self):
return self.event.port
def is_arp(self):
packet = self.event.parsed
return isinstance(packet.next, arp)
def get_arp_ip_source(self):
"""
Gibt die IP-Adresse des Systems zurueck, von dem der ARP-Request gestartet wurde.
Gibt None zurueck, falls es sich nicht um ein ARP-Request Paket handelt.
"""
packet = self.event.parsed
if not isinstance(packet.next, arp): return None
return packet.next.protosrc
def get_arp_ip_target(self):
"""
Gibt die aufzuloesende IP-Adresse eines ARP-Request Paketes zurueck.
Gibt None zurueck, falls es sich nicht um ein ARP-Request Paket handelt.
"""
packet = self.event.parsed
if not isinstance(packet.next, arp): return None
return packet.next.protodst
def answer_arp(self, mac):
"""
Erstellt fuer ein eingegangenes ARP-Request Paket ein Antwortpaket, dass
fuer die angefragte IP-Adresse mac als Antwort zurueckgibt.
"""
packet = self.event.parsed
if not isinstance(packet.next, arp): return
a = packet.next
if a.opcode == arp.REQUEST:
r = arp()
r.hwtype = a.hwtype
r.prototype = a.prototype
r.hwlen = a.hwlen
r.protolen = a.protolen
r.opcode = arp.REPLY
r.hwdst = a.hwsrc
r.protodst = a.protosrc
r.protosrc = a.protodst
r.hwsrc = mac
e = ethernet(type=packet.type, src=mac, dst=a.hwsrc)
e.set_payload(r)
# log.debug("%i %i answering ARP for %s" % (dpid, inport,str(r.protosrc)))
msg = of.ofp_packet_out()
msg.data = e.pack()
msg.actions.append(of.ofp_action_output(port = of.OFPP_IN_PORT))
msg.in_port = self.inport
self.event.connection.send(msg)
def get_mac_src(self):
packet = self.event.parsed
if isinstance(packet, ethernet):
return packet.src
return None;
def get_mac_dst(self):
packet = self.event.parsed
if isinstance(packet, ethernet):
return packet.dst
return None;
def get_ip_src(self):
packet = self.event.parsed
if isinstance(packet.next, ipv4):
return packet.next.srcip
return None;
def get_ip_dst(self):
packet = self.event.parsed
if isinstance(packet.next, ipv4):
return packet.next.dstip
return None;
class MyFlow():
def __init__(self, event):
self.event = event
self.msg = of.ofp_flow_mod()
def program(self):
self.event.connection.send(self.msg)
def action_output(self, port):
self.msg.actions.append(of.ofp_action_output(port = port))
def action_drop(self):
pass
def set_idle_timeout(self, t):
self.msg.idle_timeout = t
def set_hard_timeout(self, t):
self.msg.hard_timeout = t
def match_mac_src(self, addr):
if isinstance(addr, basestring):
addr = EthAddr(addr)
self.msg.match.dl_src = addr
def match_mac_dst(self, addr):
if isinstance(addr, basestring):
addr = EthAddr(addr)
self.msg.match.dl_dst = addr
def match_ip_src(self, addr):
self.msg.match.dl_type = ethernet.IP_TYPE # required!
self.msg.match.nw_src = addr
def match_ip_dst(self, addr):
self.msg.match.dl_type = ethernet.IP_TYPE # required!
self.msg.match.nw_dst = addr
def match_inport(self, port):
self.msg.match.in_port = int(port)
| 35.351351 | 93 | 0.648471 |
6357b525697ac67d1628ddd407b9b8340c1c2bf2
| 828 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/stock/doctype/item_attribute/test_item_attribute.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/stock/doctype/item_attribute/test_item_attribute.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/stock/doctype/item_attribute/test_item_attribute.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors
# See license.txt
import frappe
import unittest
test_records = frappe.get_test_records('Item Attribute')
from erpnext.stock.doctype.item_attribute.item_attribute import ItemAttributeIncrementError
class TestItemAttribute(unittest.TestCase):
def setUp(self):
if frappe.db.exists("Item Attribute", "_Test_Length"):
frappe.delete_doc("Item Attribute", "_Test_Length")
def test_numeric_item_attribute(self):
item_attribute = frappe.get_doc({
"doctype": "Item Attribute",
"attribute_name": "_Test_Length",
"numeric_values": 1,
"from_range": 0.0,
"to_range": 100.0,
"increment": 0
})
self.assertRaises(ItemAttributeIncrementError, item_attribute.save)
item_attribute.increment = 0.5
item_attribute.save()
| 26.709677 | 91 | 0.76087 |
89ba83ff6545e409db5512ffcbd3f31f69799f4c
| 257 |
py
|
Python
|
connect/config.py
|
slaurianodev/msgraph
|
617e5e7d890a06757ef38871d9ddecfac2852c84
|
[
"MIT"
] | null | null | null |
connect/config.py
|
slaurianodev/msgraph
|
617e5e7d890a06757ef38871d9ddecfac2852c84
|
[
"MIT"
] | null | null | null |
connect/config.py
|
slaurianodev/msgraph
|
617e5e7d890a06757ef38871d9ddecfac2852c84
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft. All rights reserved. Licensed under the MIT license.
# See LICENSE in the project root for license information.
# Client ID and secret.
client_id = '07c53e00-1adb-4fa7-8933-fd98f6a4da84'
client_secret = '7CmTo1brGWMmh5RoFiTdO0n'
| 36.714286 | 79 | 0.789883 |
7f4272e05d8a6bd2802445187d9f4e91866441dc
| 2,514 |
py
|
Python
|
Packs/Troubleshoot/Scripts/TroubleshootAggregateResults/TroubleshootAggregateResults.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Troubleshoot/Scripts/TroubleshootAggregateResults/TroubleshootAggregateResults.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Troubleshoot/Scripts/TroubleshootAggregateResults/TroubleshootAggregateResults.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
"""
Aggregating results into a single file
"""
from CommonServerPython import *
def find_entry_id_by_name(doc_file_name: str) -> str:
context = demisto.context()
file_context = context.get('File', {})
if not file_context:
return ''
if isinstance(file_context, dict) and file_context['Name'] == doc_file_name:
return file_context['EntryID']
# List of files
for file_obj in file_context:
if file_obj['Name'] == doc_file_name:
return file_obj['EntryID']
return ''
def main():
try:
args = demisto.args()
changed_succeeded = argToList(args.get('succeeded_changed_params'))
file_names = argToList(args.get('file_names'))
execute_command_errors = argToList(args.get('execute_command_errors'))
configuration = args['configuration']
if not isinstance(configuration, dict):
json.loads(configuration)
raw_instance = configuration.get('RawInstance')
brand = raw_instance.get('brand')
errors = argToList(args.get('errors'))
instance_name = configuration.get("instance_name")
doc = f"""\
# Configuration Troubleshooting summary for {brand}.
---
Instance name : {instance_name}
{tableToMarkdown('Configuration Info:', configuration, ['proxy', 'system', 'isFetch', 'dockerImage', 'engine', 'deprecated'])}
{tableToMarkdown('Errors encountered in test-module (Test button)', errors, ['Errors'])}
{tableToMarkdown('Parameters changed resulted in test succeeded', changed_succeeded, ['Changed keys'])}
{tableToMarkdown('Errors encountered in command running:', execute_command_errors, ['Errors'])}
{tableToMarkdown('Files found in the investigation:', file_names, ['File Names'])}
"""
configuration_name = f'{instance_name}_configuration.md'
demisto.results(fileResult(
configuration_name,
json.dumps(raw_instance, indent=4)
))
doc_file_name = f'{instance_name}_summary.md'
demisto.results(fileResult(
doc_file_name,
doc
))
context = {
'TroubleshootAggregateResults': {
'configuration_file_name': configuration_name,
'summary_file_name': doc_file_name
}
}
return_outputs(doc, context)
except Exception as exc:
demisto.error(traceback.format_exc()) # print the traceback
return_error(str(exc))
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 35.914286 | 126 | 0.655529 |
7f72fa866d1035a1b976f384445893b67cde1218
| 1,054 |
py
|
Python
|
python/en/_packages/pillow/pillow_tutorial-using_the_image_class.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/_packages/pillow/pillow_tutorial-using_the_image_class.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/_packages/pillow/pillow_tutorial-using_the_image_class.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
pillow_tutorial-using_the_image_class.py
Pillow Docs > Handbook > Tutorial > Using the Image class
Pillow 6.0.0
https://pypi.org/project/Pillow/
Handbook
https://pillow.readthedocs.io/en/latest/handbook/index.html
Tutorial > Using the Image class
https://pillow.readthedocs.io/en/latest/handbook/tutorial.html
---
Pillow is the friendly PIL fork.
PIL stands for Python Imaging Library.
Installation
$ sudo pip3 install Pillow
"""
from PIL import Image
im = Image.open("surfing_couple.jpg")
print( im.format, im.size, im.mode )
#JPEG (2048, 2048) RGB
im.show()
# The standard version of show() is not very efficient,
# since it saves the image to a temporary file and
# calls a utility to display the image.
# If you don’t have an appropriate utility installed, it won’t even work.
# When it does work though, it is very handy for debugging and tests.
# Simple geometry transforms
out = im.resize( (128,128) )
print( out.format, out.size, out.mode )
# None (128, 128) RGB
| 26.35 | 73 | 0.72296 |
7fbd095150136f2c8dd530546a565ce5be2d468f
| 523 |
py
|
Python
|
src/onegov/agency/theme/agency_theme.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/agency/theme/agency_theme.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/agency/theme/agency_theme.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.core.utils import module_path
from onegov.org.theme import OrgTheme
class AgencyTheme(OrgTheme):
name = 'onegov.agency.foundation'
@property
def post_imports(self):
return super().post_imports + [
'agency',
'chosen',
'people',
'ticket',
'search'
]
@property
def extra_search_paths(self):
base_paths = super().extra_search_paths
return [module_path('onegov.agency.theme', 'styles')] + base_paths
| 23.772727 | 74 | 0.600382 |
89bf502bc07697f3ffa1d4e6953b3be3bebcb770
| 214 |
py
|
Python
|
python_gui_tkinter/KALU/GARBAGE1/TEST/sound.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python_gui_tkinter/KALU/GARBAGE1/TEST/sound.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python_gui_tkinter/KALU/GARBAGE1/TEST/sound.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
from tkinter import *
import mp3play
root = Tk() # create tkinter window
f = mp3play.load('alrt1.mp3'); play = lambda: f.play()
button = Button(root, text = 'Play', command = play)
button.pack()
root.mainloop()
| 19.454545 | 54 | 0.686916 |
61731e1c32580afccfd200f305d268739dad7651
| 607 |
py
|
Python
|
EM/src/EM.py
|
quqixun/MLAlgorithms
|
1ad46a899a6280a08c196fb4eb0931408c8636c7
|
[
"MIT"
] | 2 |
2018-04-25T18:00:28.000Z
|
2018-08-08T09:39:18.000Z
|
EM/src/EM.py
|
quqixun/MLAlgorithms
|
1ad46a899a6280a08c196fb4eb0931408c8636c7
|
[
"MIT"
] | null | null | null |
EM/src/EM.py
|
quqixun/MLAlgorithms
|
1ad46a899a6280a08c196fb4eb0931408c8636c7
|
[
"MIT"
] | 2 |
2019-03-03T02:55:48.000Z
|
2021-01-21T04:50:46.000Z
|
# Conventional Machine Learning Algorithms
# Class of "EM".
# Author: Qixun Qu
# Create on: 2018/06/20
# Modify on: 2018/06/20
# ,,, ,,,
# ;" '; ;' ",
# ; @.ss$$$$$$s.@ ;
# `s$$$$$$$$$$$$$$$'
# $$$$$$$$$$$$$$$$$$
# $$$$P""Y$$$Y""W$$$$$
# $$$$ p"$$$"q $$$$$
# $$$$ .$$$$$. $$$$'
# $$$DaU$$O$$DaU$$$'
# '$$$$'.^.'$$$$'
# '&$$$$$&'
from __future__ import division
from __future__ import print_function
import numpy as np
class EM(object):
def __init__(self):
return
def fit(X, y=None):
return
def predict(X):
return
| 16.405405 | 42 | 0.418451 |
14ac0a301e00fbcea3a0ac0c3b3b977fca8f3b47
| 259 |
py
|
Python
|
Classes/Brick.py
|
EKaczmarek/BomberMan
|
a232d00ff0fe9be0a1276fba22c3f120e2bb4e5d
|
[
"MIT"
] | 1 |
2018-04-30T15:46:47.000Z
|
2018-04-30T15:46:47.000Z
|
Classes/Brick.py
|
EKaczmarek/BomberMan
|
a232d00ff0fe9be0a1276fba22c3f120e2bb4e5d
|
[
"MIT"
] | 1 |
2018-06-03T13:20:19.000Z
|
2018-06-03T20:19:25.000Z
|
Classes/Brick.py
|
EKaczmarek/BomberMan
|
a232d00ff0fe9be0a1276fba22c3f120e2bb4e5d
|
[
"MIT"
] | 1 |
2018-06-03T12:59:11.000Z
|
2018-06-03T12:59:11.000Z
|
import pygame
class Brick(object):
x, y = '', ''
desc = "brick"
def __init__(self, pos):
self.x = pos[0]
self.y = pos[1]
self.rect = pygame.Rect(self.x, self.y, 50, 50)
def get_brick(self):
return self
| 12.95 | 55 | 0.513514 |
1aedd8abf3998a6d6a7972cb7badb05cba3fbf51
| 580 |
py
|
Python
|
IVTp/2014/Grigoriev_A_O/task_3_2.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
IVTp/2014/Grigoriev_A_O/task_3_2.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
IVTp/2014/Grigoriev_A_O/task_3_2.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
#Задача 3. Вариант 2.
#Напишите программу, которая выводит имя "Мария Луиза Чеччарелли", и запрашивает его псевдоним. Программа должна сцеплять две эти строки и выводить полученную строку, разделяя имя и псевдоним с помощью тире.
#Григорьев А.О.
#23.05.2016
print ("Герой нашей сегоднящней программы - Мария Луиза Чеччарелли")
psev=input("Под каким же именем мы знаем этого человека? Ваш ответ:")
if (psev)==("Моника Витти"):
print ("Всё верно: Мария Луиза Чеччарелли - "+psev)
else:
print ("Вы ошиблись, это не её псевдоним.")
input("нажмите Enter для выхода")
| 44.615385 | 208 | 0.737931 |
2142fc70b2a30c705dbae777b03c62a0de3c2d0c
| 6,660 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/config/hr.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/config/hr.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/config/hr.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Employee and Attendance"),
"items": [
{
"type": "doctype",
"name": "Employee",
"description": _("Employee records."),
},
{
"type": "doctype",
"name": "Employee Attendance Tool",
"label": _("Employee Attendance Tool"),
"description":_("Mark Attendance for multiple employees"),
"hide_count": True
},
{
"type": "doctype",
"name": "Attendance",
"description": _("Attendance record."),
},
{
"type": "doctype",
"name": "Upload Attendance",
"description":_("Upload attendance from a .csv file"),
"hide_count": True
},
]
},
{
"label": _("Recruitment"),
"items": [
{
"type": "doctype",
"name": "Job Applicant",
"description": _("Applicant for a Job."),
},
{
"type": "doctype",
"name": "Job Opening",
"description": _("Opening for a Job."),
},
{
"type": "doctype",
"name": "Job Offer",
"description": _("Offer candidate a Job."),
},
]
},
{
"label": _("Leaves and Holiday"),
"items": [
{
"type": "doctype",
"name": "Leave Application",
"description": _("Applications for leave."),
},
{
"type": "doctype",
"name":"Leave Type",
"description": _("Type of leaves like casual, sick etc."),
},
{
"type": "doctype",
"name": "Holiday List",
"description": _("Holiday master.")
},
{
"type": "doctype",
"name": "Leave Allocation",
"description": _("Allocate leaves for a period.")
},
{
"type": "doctype",
"name": "Leave Control Panel",
"label": _("Leave Allocation Tool"),
"description":_("Allocate leaves for the year."),
"hide_count": True
},
{
"type": "doctype",
"name": "Leave Block List",
"description": _("Block leave applications by department.")
},
]
},
{
"label": _("Payroll"),
"items": [
{
"type": "doctype",
"name": "Salary Slip",
"description": _("Monthly salary statement."),
},
{
"type": "doctype",
"name": "Payroll Entry",
"label": _("Payroll Entry"),
"description":_("Generate Salary Slips"),
"hide_count": True
},
{
"type": "doctype",
"name": "Salary Structure",
"description": _("Salary template master.")
},
{
"type": "doctype",
"name": "Salary Component",
"label": _("Salary Components"),
"description": _("Earnings, Deductions and other Salary components")
},
]
},
{
"label": _("Expense Claims"),
"items": [
{
"type": "doctype",
"name": "Employee Advance",
"description": _("Manage advance amount given to the Employee"),
},
{
"type": "doctype",
"name": "Expense Claim",
"description": _("Claims for company expense."),
},
{
"type": "doctype",
"name": "Expense Claim Type",
"description": _("Types of Expense Claim.")
},
]
},
{
"label": _("Appraisals"),
"items": [
{
"type": "doctype",
"name": "Appraisal",
"description": _("Performance appraisal."),
},
{
"type": "doctype",
"name": "Appraisal Template",
"description": _("Template for performance appraisals.")
},
{
"type": "page",
"name": "team-updates",
"label": _("Team Updates")
},
]
},
{
"label": _("Employee Loan Management"),
"icon": "icon-list",
"items": [
{
"type": "doctype",
"name": "Loan Type",
"description": _("Define various loan types")
},
{
"type": "doctype",
"name": "Employee Loan Application",
"description": _("Employee Loan Application")
},
{
"type": "doctype",
"name": "Employee Loan"
},
]
},
{
"label": _("Training"),
"items": [
{
"type": "doctype",
"name": "Training Program"
},
{
"type": "doctype",
"name": "Training Event"
},
{
"type": "doctype",
"name": "Training Result"
},
{
"type": "doctype",
"name": "Training Feedback"
},
]
},
{
"label": _("Fleet Management"),
"items": [
{
"type": "doctype",
"name": "Vehicle"
},
{
"type": "doctype",
"name": "Vehicle Log"
},
]
},
{
"label": _("Setup"),
"icon": "fa fa-cog",
"items": [
{
"type": "doctype",
"name": "HR Settings",
"description": _("Settings for HR Module")
},
{
"type": "doctype",
"name": "Employment Type",
"description": _("Types of employment (permanent, contract, intern etc.).")
},
{
"type": "doctype",
"name": "Branch",
"description": _("Organization branch master.")
},
{
"type": "doctype",
"name": "Department",
"description": _("Organization unit (department) master.")
},
{
"type": "doctype",
"name": "Designation",
"description": _("Employee designation (e.g. CEO, Director etc.).")
},
{
"type": "doctype",
"name": "Daily Work Summary Settings"
},
{
"type": "doctype",
"name": "Health Insurance"
}
]
},
{
"label": _("Reports"),
"icon": "fa fa-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Employee Leave Balance",
"doctype": "Leave Application"
},
{
"type": "report",
"is_query_report": True,
"name": "Employee Birthday",
"doctype": "Employee"
},
{
"type": "report",
"is_query_report": True,
"name": "Employees working on a holiday",
"doctype": "Employee"
},
{
"type": "report",
"name": "Employee Information",
"doctype": "Employee"
},
{
"type": "report",
"is_query_report": True,
"name": "Salary Register",
"doctype": "Salary Slip"
},
{
"type": "report",
"is_query_report": True,
"name": "Monthly Attendance Sheet",
"doctype": "Attendance"
},
{
"type": "report",
"is_query_report": True,
"name": "Vehicle Expenses",
"doctype": "Vehicle"
},
]
},
{
"label": _("Help"),
"icon": "fa fa-facetime-video",
"items": [
{
"type": "help",
"label": _("Setting up Employees"),
"youtube_id": "USfIUdZlUhw"
},
{
"type": "help",
"label": _("Leave Management"),
"youtube_id": "fc0p_AXebc8"
},
{
"type": "help",
"label": _("Expense Claims"),
"youtube_id": "5SZHJF--ZFY"
}
]
}
]
| 20.492308 | 80 | 0.49955 |
0d2a350eb440e78278cb7c7fe2f288599b2eadcf
| 488 |
py
|
Python
|
Input_Gui.py
|
Snrsunny143/Taking-Input-In-Python-GUI
|
b98be4e784a0a307c84f57492d5ccb26b8bbc24d
|
[
"MIT"
] | 1 |
2020-08-29T03:54:24.000Z
|
2020-08-29T03:54:24.000Z
|
Input_Gui.py
|
Snrsunny143/Taking-Input-In-Python-GUI
|
b98be4e784a0a307c84f57492d5ccb26b8bbc24d
|
[
"MIT"
] | null | null | null |
Input_Gui.py
|
Snrsunny143/Taking-Input-In-Python-GUI
|
b98be4e784a0a307c84f57492d5ccb26b8bbc24d
|
[
"MIT"
] | null | null | null |
from tkinter import*
import sys
root = Tk()
root.title("Snr Tech And Tutorials")
input =Entry(root)
input.pack()
def printout():
print("User Input :- "+input.get())
def quit():
print("User Input :- Bye. ")
print(" Meet You Later. ")
sys.exit()
button = Button(root, text =" PrintOut ", command=printout , fg = "pink" ,bg = "green")
button.pack()
quit_button = Button(root, text = " Quit ",command=quit , fg = "grey", bg = "pink")
quit_button.pack()
root.mainloop()
| 19.52 | 87 | 0.633197 |
d3a98a72dff4f9eb164b86ee39ad16b58c883edb
| 1,722 |
py
|
Python
|
python_experiments/run_vldbj_experiments/run_reads_dynamic_update.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | 8 |
2020-04-14T23:17:00.000Z
|
2021-06-21T12:34:04.000Z
|
python_experiments/run_vldbj_experiments/run_reads_dynamic_update.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | null | null | null |
python_experiments/run_vldbj_experiments/run_reads_dynamic_update.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | 1 |
2021-01-17T16:26:50.000Z
|
2021-01-17T16:26:50.000Z
|
from exec_utilities import time_out_util
from exec_utilities.exec_utils import *
def run_exp():
our_exec_path = '/homes/ywangby/workspace/yche/new-git-repos-yche/SimRank/SPS-Variants/READS/build'
data_set_lst = [
# 'ca-GrQc', 'ca-HepTh', 'p2p-Gnutella06', 'wiki-Vote',
# 'email-Enron', 'email-EuAll', 'web-NotreDame', 'web-Stanford',
# 'web-BerkStan', 'web-Google',
# 'cit-Patents', 'soc-LiveJournal1',
# 'wiki-Link',
'digg-friends',
'flickr-growth',
]
dynamic_exec_tag_lst = [
'reads-rq-dynamic-del',
'reads-rq-dynamic-exp',
# 'reads-d-dynamic-del',
# 'reads-d-dynamic-exp',
]
def one_round():
for exec_name in dynamic_exec_tag_lst:
for data_set_name in data_set_lst:
algorithm_path = our_exec_path + os.sep + exec_name
statistics_file_path = 'exp_results/' + exec_name + '_dynamic_update_time_' + str(
insert_edge_num) + '_0407.txt'
params_lst = map(str, [algorithm_path, data_set_name, '>>', statistics_file_path])
cmd = ' '.join(params_lst)
time_out = 72000
tle_flag, info, correct_info = time_out_util.run_with_timeout(cmd, timeout_sec=time_out)
write_split(statistics_file_path)
with open(statistics_file_path, 'a+') as ifs:
ifs.write(correct_info)
ifs.write(my_splitter + time.ctime() + my_splitter)
ifs.write('is_time_out:' + str(tle_flag))
ifs.write('\n\n\n\n')
insert_edge_num = 1000
one_round()
if __name__ == '__main__':
run_exp()
| 36.638298 | 104 | 0.583043 |
4ce484ecc38278ecea33be0cfb19f3c9f0983e12
| 18,650 |
py
|
Python
|
src/scripts/alchi-web.py
|
milahu/alchi
|
6484d4a877d47204e28cf1a32a5d9da8705aff25
|
[
"CC0-1.0"
] | 3 |
2020-08-12T16:57:23.000Z
|
2021-03-15T18:39:48.000Z
|
src/scripts/alchi-web.py
|
milahu/alchi
|
6484d4a877d47204e28cf1a32a5d9da8705aff25
|
[
"CC0-1.0"
] | 4 |
2020-09-22T19:25:43.000Z
|
2022-02-14T20:51:16.000Z
|
src/scripts/alchi-web.py
|
milahu/alchi
|
6484d4a877d47204e28cf1a32a5d9da8705aff25
|
[
"CC0-1.0"
] | 1 |
2021-04-06T11:18:17.000Z
|
2021-04-06T11:18:17.000Z
|
#!/usr/bin/python3
"""
the web of sixteen types
the nunu matrix
two power four
latin square of order four
"""
# definitions
names = [
None, 'F1', 'M2', 'M3', 'F4', # left = matriarchy
None, 'M1', 'F2', 'F3', 'M4', # right = patriarchy
]
names_long = [
None, # 0
'female fire', # 1
'male earth', # 2
'male air', # 3
'female water', # 4
None, # 5
'male fire', # 6
'female earth', # 7
'female air', # 8
'male water', # 9
]
# congruence of gender
gl = [1, 2, 3, 4] # left = matriarchy
gr = [6, 7, 8, 9] # right = patriarchy
# gender
gm = [2, 3, 6, 9] # male = penis
gf = [1, 4, 7, 8] # female = vagina
# tempo
tp = [1, 4, 6, 9] # psychotic = phlegmatic
tn = [2, 3, 7, 8] # neurotic = choleric
# mood
me = [1, 3, 6, 8] # extravert = sanguinic
mi = [2, 4, 7, 9] # introvert = melancholic
# class
cc = [1, 2, 6, 7] # classic
cr = [3, 4, 8, 9] # romantic
# bond signs: first, second, third, last
# convert to natlist later
#B = ['I', 'U', 'E', 'A'] # 1 2 3 4, self is dott
B = ['U', 'E', 'A', 'O'] # 2 3 4 5, first bond is to self = body
# helper functions
# string translation
def tl(s, a, b):
return s.translate(s.maketrans(a, b))
# list intersection
def lx(*a):
a = list(a) # convert tuple to list
z = a.pop(0)
for b in a:
z = [c for c in z if c in b]
if len(z) == 1:
return z[0]
return z
# print without newline
import sys
def _print(*a):
sys.stdout.write(*a)
# natural list in python
# first index is one
# slice stop is inclusive
# copy from https://stackoverflow.com/a/48873374/10440128
class natlist(dict):
def __init__(self, *items: any) -> None:
# two constructors:
# natlist(1, 2, 3, 4)
# natlist( [1, 2, 3, 4] ) # convert one list to natlist
if len(items) == 1 and type(items[0]) == list:
items = items[0]
self.__dict__ = dict(zip(
range(1, 1+len(items)),
items))
def __repr__(self) -> str:
return '{}({})'.format(
self.__class__.__name__,
repr(list(self.__dict__.values()))[1:-1])
def __contains__(self, item: any) -> bool:
return item in self.__dict__.values()
def __len__(self) -> int:
return len(self.__dict__.values())
# note: key.stop is inclusive
# so natlist(1, 2, 3, 4)[2:3] is [2, 3]
def __getitem__(self, key: any) -> any:
if type(key) == slice:
a = key.start and key.start or 1
b = key.stop and key.stop or None # inclusive stop
#b = key.stop and key.stop - 1 or None # exclusive stop
return list(self.__dict__.values())[a-1:b:key.step]
return self.__dict__[key]
def __setitem__(self, key: int, value: any) -> None:
self.__dict__[key] = value
def __delitem__(self, key: int) -> None:
del self.__dict__[key]
def __iter__(self) -> iter:
return iter(self.__dict__.values())
def items(self):
return self.__dict__.items()
# get slice as list of (key, value) tuples
# left slice: sliceitems(None, key_stop)
# right slice: sliceitems(key_start, None)
# note: key.stop is inclusive
def sliceitems(self, *key: slice) -> any:
key = slice(*key)
a = key.start and key.start or 1
b = key.stop and key.stop or len(self) # inclusive stop
#b = key.stop and key.stop - 1 or len(self) # exclusive stop
b = (b < 0) and len(self)+b
return dict(zip(
range(a, b+1),
list(self.__dict__.values())[a-1:b:key.step]
)).items()
L = natlist
B = L(B) # convert list to natlist
matrix = {}
"""
matrix['null'] = [
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
]
4 elements:
1. fire
2. earth
3. air
4. water
2 sides:
left. F1 F4 M3 M2. fem psych + male neur
right. M1 M4 F3 F2. male psych + fem neur
2 ages:
old. top half = matrix[0:1]
young. bottom half = matrix[2:3]
first order:
same class + other tempo + other sex.
"""
matrix['fire center + sanguinic center'] = [
[4, 3, 3, 4],
[2, 1, 1, 2],
[2, 1, 1, 2],
[4, 3, 3, 4],
]
matrix['web-rc-top-go-left'] = [
[2, 3, 4, 1],
[1, 4, 3, 2],
[3, 2, 1, 4],
[4, 1, 2, 3],
]
matrix['web-rc-left-go-top'] = [
[2, 1, 3, 4],
[3, 4, 2, 1],
[4, 3, 1, 2],
[1, 2, 4, 3],
]
gender = [
[None, 'F', 'M', 'M', 'F'], # left side
[None, 'F', 'M', 'M', 'F'], # left side
[None, 'M', 'F', 'F', 'M'], # right side
[None, 'M', 'F', 'F', 'M'], # right side
]
lines_x_16 = [
[' ', '─\u252C─', ' ', '─\u252C─'],
[' ', '─\u2534─', ' │ ', '─\u2534─'],
[' ', '─\u252C─', ' │ ', '─\u252C─'],
[' ', '─\u2534─', ' ', '─\u2534─'],
]
lines_y_16 = [
' Old \n',
' \u251C────\u252C────\u2524 \n',
'L \u253C R\n',
' \u251C────\u2534────\u2524 \n',
' Young '
]
lines_x_8 = [
[' ', '─\u252C─', ' ', '─\u252C─'],
[' ', '─\u2534─', ' ', '─\u2534─'],
]
lines_y_8 = [
'',
'L \u251C─────────\u2524 R\n',
'',
]
lines_x_4 = [
['', '─\u252C─'],
['', '─\u2534─'],
]
lines_y_4 = [
'',
' │\n',
'',
]
# box drawing characters
# --- = '─'
# | = '│'
# |- = '\u251C'
# -| = '\u2524'
# T = '\u252C'
# _|_ = '\u2534'
# + = '\u253C'
# serialize 16 elements
def str_16(matrix):
s = ''
for y in range(0, 4):
s += lines_y_16[y]
for x in range(0, 4):
e = matrix[y][x]
g = gender[x][e]
s += lines_x_16[y][x] + g + str(e)
s += '\n'
s += lines_y_16[4]
return s
# serialize 8 elements
def str_8(matrix):
s = ''
for y in range(0, 2):
s += lines_y_8[y]
for x in range(0, 4):
e = matrix[y][x]
g = gender[x][e]
s += lines_x_8[y][x] + g + str(e)
s += '\n'
s += lines_y_8[2]
return s
# serialize 4 elements
def str_4(matrix):
s = ''
for y in range(0, 2):
s += lines_y_4[y]
for x in range(0, 2):
e = matrix[y][x]
g = gender[x][e]
s += lines_x_4[y][x] + g + str(e)
s += '\n'
s += lines_y_4[2]
return s
# web of types
def web_16(matrix):
s = ''
L = [] # line buffer
for y in range(-1, 5):
y0 = y
y = y % 4
for x in range(-1, 5):
x = x % 4
L.append(str(matrix[y][x]))
if y0 in [-1, 4]:
s += '{0} {1} {2} {3} {4} {5}\n'.format(*L)
else:
s += '{0} │ {1} {2} {3} {4} │ {5}\n'.format(*L)
if y == 1:
s += ' │ │\n'
# with center cross:
#s += ' │ \u253C │\n'
if y0 == -1:
s += ' ┌' + '─'*11 + '┐\n'
if y0 == 3:
s += ' └' + '─'*11 + '┘\n'
L = []
return s
# verbose web of types
#todo move gender outside of matrix, to left or top
# left:
# M: ....
# F: ....
# M: ....
# F: ....
# top:
# M F M F
# ¨ ¨ ¨ ¨
# : : : :
# : : : :
def web_16_v(matrix):
s = ''
L = [] # line buffer
web_gender = ['M', 'F']
for y in range(-1, 5):
y0 = y
y = y % 4
g = web_gender[y % 2] # gender switch with every line
for x in range(-1, 5):
x = x % 4
L.append(g + str(matrix[y][x]))
if y0 in [-1, 4]:
s += '{0} {1} {2} {3} {4} {5}\n'.format(*L)
else:
s += '{0} │ {1} {2} {3} {4} │ {5}\n'.format(*L)
if y == 1:
s += ' │ │\n'
# with center cross:
#s += ' │ \u253C │\n'
if y0 in [0, 2]:
s += ' │ │ │ │ │ │\n'
if y0 == -1:
s += ' ┌' + '─'*15 + '┐\n'
if y0 == 3:
s += ' └' + '─'*15 + '┘\n'
L = []
return s
# web of 16 types, diagonal mirror
def web_16_2_v(matrix):
s = ''
L = [] # line buffer
web_gender = ['M', 'F']
for y in range(-1, 5):
y0 = y
y = y % 4
for x in range(-1, 5):
g = web_gender[x % 2] # gender switch with every delta x
x = x % 4
L.append(g + str(matrix[y][x]))
if y0 in [-1, 4]:
s += '{0} {1} {2} {3} {4} {5}\n'.format(*L)
else:
#s += '{0} │ {1} {2} {3} {4} │ {5}\n'.format(*L)
s += '{0} │ {1}─{2} {3}─{4} │ {5}\n'.format(*L)
if y == 1:
s += ' │ │\n'
# with center cross:
#s += ' │ \u253C │\n'
if y0 in [0, 2]:
#s += ' │ │ │ │ │ │\n'
s += ' │ │\n'
if y0 == -1:
s += ' ┌' + '─'*15 + '┐\n'
if y0 == 3:
s += ' └' + '─'*15 + '┘\n'
L = []
return s
#print(web_16(matrix['web-rc-top-go-left']))
#print(web_16_v(matrix['web-rc-top-go-left']))
##print(web_16_2(matrix['web-rc-left-go-top']))
print(web_16_2_v(matrix['web-rc-left-go-top']))
#for key in matrix:
if False:
print('%s:' % key)
print('16:')
print(str_16(matrix[key]))
print('8 top:')
print(str_8(matrix[key][0:2]))
print('8 bot:')
print(str_8(matrix[key][2:4]))
print('4 top left:')
print(str_4(matrix[key]))
"""
view = construct one of sixteen views
i = eye = top left number
v = vertical "last bond" [age, time]
True if "first bond" is horizontal
False if "first bond" is vertical
"""
import copy
def view(i, v=True):
m = L(
L(0, 0, 0, 0),
L(0, 0, 0, 0),
L(0, 0, 0, 0),
L(0, 0, 0, 0),
)
# it starts with eye
m[1][1] = i
# let us assume "v is True" for now,
# and later transform the matrix, according to h.
# so, for now, first bond is horizontal.
# first bond to you
m[1][2] = lx(
(i in gl) and gl or gr, # same congruence
(i in me) and mi or me, # other mood
(i in tn) and tn or tp # same tempo
)
# second bond to het
m[2][1] = lx(
(i in gl) and gl or gr, # same congruence
(i in me) and mi or me, # other mood
(i in tp) and tn or tp # other tempo
)
# opposite type is "het of you"
m[2][2] = lx(
(i in gl) and gl or gr, # same congruence
(i in mi) and mi or me, # same mood
(i in tp) and tn or tp # other tempo
)
# mirror to the right = other congruence
d = (i in gl) and +5 or -5
m[2][4] = m[1][1] + d
m[2][3] = m[1][2] + d
m[1][4] = m[2][1] + d
m[1][3] = m[2][2] + d
# mirror to the bottom = other age
m[3][1] = m[1][2]
m[4][1] = m[2][2]
m[3][2] = m[1][1]
m[4][2] = m[2][1]
m[3][3] = m[1][4]
m[4][3] = m[2][4]
m[3][4] = m[1][3]
m[4][4] = m[2][3]
# flip diagonal
if v == False:
t = copy.deepcopy(m)
for y in range(1, 5):
for x in range(1, 5):
m[y][x] = t[x][y]
return m
___lines_x_16_new = L(
L('', '', '', ' ', ' ', ' ', '', ''),
L('', '', '', ' ', ' ', ' ', '', ''),
L('', '', '', ' U ', ' ', ' U ', '', ''),
L('', '', '│', ' U ', ' │ ', ' U ', '│', '', ''),
L('', '', '│', ' U ', ' │ ', ' U ', '│', '', ''),
L('', '', '', ' U ', ' ', ' U ', '', ''),
L('', '', '', ' ', ' ', ' ', '', ''),
L('', '', '', ' ', ' ', ' ', '', ''),
)
___lines_y_16_new = L(
'',
'',
' ─────────\n',
' E───A───E \n',
' O\n',
' E───A───E \n',
' ─────────\n',
'',
'',
'',
)
__lines_x_16_new = L(
L('', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
L('', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
L('', ' ', ' │ ', ' I ', ' ', ' I ', ' │ ', ' '),
L('', ' ', ' │ ', ' I ', ' │ ', ' I ', ' │ ', ' '),
L('', ' ', ' │ ', ' I ', ' │ ', ' I ', ' │ ', ' '),
L('', ' ', ' │ ', ' I ', ' ', ' I ', ' │ ', ' '),
L('', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
L('', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
)
____lines_y_16_new = L(
'',
'\n',
' ┌───────────────┐\n',
' │ U───E───U │\n',
' │ A │\n',
' │ U───E───U │\n',
' └───────────────┘\n',
'\n',
'',
'',
'',
)
__lines_y_16_new = L(
'',
'\n',
' ╒═══════════════╕\n',
' │ U───E───U │\n',
' │ A │\n',
' │ U───E───U │\n',
' ╘═══════════════╛\n',
'\n',
'',
'',
'',
)
__lines_x_16_new = L(
L('', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
L('', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
L('', ' ', ' │ ', '───', ' │ ', '───', ' │ ', ' '),
L('', ' ', ' │ ', '───', ' │ ', '───', ' │ ', ' '),
L('', ' ', ' │ ', '───', ' │ ', '───', ' │ ', ' '),
L('', ' ', ' │ ', '───', ' │ ', '───', ' │ ', ' '),
L('', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
L('', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
)
__lines_y_16_new = L(
'',
'\n',
' ╒═══════╤═══════╕\n',
' │ U E U │\n',
' ╞═══════A═══════╡\n',
' │ U E U │\n',
' ╘═══════╧═══════╛\n',
'\n',
'',
'',
'',
)
lines_x_16_new = L(
L('', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
L('', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
L('', ' ', ' │ ', ' U ', ' │ ', ' U ', ' │ ', ' '),
L('', ' ', ' │ ', ' U ', ' │ ', ' U ', ' │ ', ' '),
L('', ' ', ' │ ', ' U ', ' │ ', ' U ', ' │ ', ' '),
L('', ' ', ' │ ', ' U ', ' │ ', ' U ', ' │ ', ' '),
L('', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
L('', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
)
lines_y_16_new = L(
'',
'\n',
' ╒═══════╤═══════╕\n',
' │ E A E │\n',
' ╞═══════O═══════╡\n',
' │ E A E │\n',
' ╘═══════╧═══════╛\n',
'\n',
'',
'',
'',
)
__lines_y_16_new_v0 = L(
'',
'\n',
' ┌───────────────┐\n',
' │ I U I I U I │\n',
' │ E───A───E │\n',
' │ I U I I U I │\n',
' └───────────────┘\n',
'\n',
'',
'',
'',
)
lines_x_16_new_v0 = L(
L('', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
L('', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
L('', ' ', ' ║ ', ' ', ' ║ ', ' ', ' ║ ', ' '),
L('', ' ', ' ║ ', ' ', ' ║ ', ' ', ' ║ ', ' '),
L('', ' ', ' ║ ', ' ', ' ║ ', ' ', ' ║ ', ' '),
L('', ' ', ' ║ ', ' ', ' ║ ', ' ', ' ║ ', ' '),
L('', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
L('', ' ', ' ', ' ', ' ', ' ', ' ', ' '),
)
lines_y_16_new_v0 = L(
'',
'\n',
' ╓───────╥───────╖\n',
' ║ U E U ║ U E U ║\n',
' ╟───A───O───A───╢\n',
' ║ U E U ║ U E U ║\n',
' ╙───────╨───────╜\n',
'\n',
'',
'',
'',
)
# serialize 16 elements
# C: compact? print only 4x4 "core" of matrix, without near-field frame
#todo: generalize to accept "frame size", fs=0 --> no frame
# .... so we can produce larger fields, like fs=4 --> 1 core + 8 "next cores"
# --> show "minor lines" as dash- / dot-lines ┈┊ ┄┆ ╌╎
def str_16_new(m, C=False):
# detect the value of v
# v is True,
# if the "last bond" is vertical
# = the "first bond" is horizontal
i = m[1][1]
# first bond to you
v = (m[1][2] == lx(
(i in gl) and gl or gr, # same congruence
(i in me) and mi or me, # other mood
(i in tn) and tn or tp # same tempo
))
#print('v is '+str(v))
xy_min = C and 3 or 1
xy_max = C and 6 or 8
s = ''
#todo write this shorter --> move the (v == True)? branch into the loops
if v:
#for y in range(1, 5):
for y in range(xy_min, 1+xy_max):
t = lines_y_16_new[y]
if C:
t = t.strip() + '\n'
s += t
#for x in range(1, 5):
#for x in range(1, 9):
for x in range(xy_min, 1+xy_max):
#i = m[y][x]
#i = m[(y-2)%4][(x-2)%4]
i = m[(y-3)%4+1][(x-3)%4+1]
if y in [1, 2, 7, 8] and x in [1, 2, 7, 8]:
i = ' '
#g = (i in gm) and 'M' or 'F'
#s += lines_x_16_new[y][x] + g + str(i)
#s += lines_x_16_new[y][x] + str(i)
t = lines_x_16_new[y][x] + str(i)
if C and x == 3:
t = t.lstrip()
s += t
if C and x == 6:
s += lines_x_16_new[y][7]
s += '\n'
if C and y == 6:
s += lines_y_16_new[7].strip() + '\n'
#s += lines_y_16_new[5]
else:
#for y in range(1, 5):
#for y in [-1, 0, 1, 2, 3, 4, 5, 6]:
#for y in range(1, 9): # 8 lines
for y in range(xy_min, 1+xy_max):
t = lines_y_16_new_v0[y]
if C:
t = t.strip() + '\n'
s += t
#for x in range(1, 5):
#for x in range(1, 9):
for x in range(xy_min, 1+xy_max):
#i = m[y][x]
i = m[(y-3)%4+1][(x-3)%4+1]
if y in [1, 2, 7, 8] and x in [1, 2, 7, 8]:
i = ' '
#g = (i in gm) and 'M' or 'F'
#s += lines_x_16_new[y][x] + g + str(i)
#s += lines_x_16_new_v0[y][x] + str(i)
t = lines_x_16_new_v0[y][x] + str(i)
if C and x == 3:
t = t.lstrip()
s += t
if C and x == 6:
s += lines_x_16_new_v0[y][7]
s += '\n'
if C and y == 6:
s += lines_y_16_new_v0[7].strip() + '\n'
#s += lines_y_16_new_v0[5]
# rename bonds. UEAO --> IUEA
if v:
s = tl(s, 'UEAO', '─UEA')
else:
s = tl(s, 'UEAO', 'IUEA')
#replace('U', '─').replace('E', 'U').replace('
return s
def str_cross(m):
i = m[1][1]
# detect the value of v
v = (m[1][2] == lx(
(i in gl) and gl or gr, # same congruence
(i in me) and mi or me, # other mood
(i in tn) and tn or tp # same tempo
))
s = ''
if v:
t = (m[1][3], m[1][4], B[3], m[1][1], B[1], m[1][2], m[1][3])
s += ' %i\n' % m[3][1]
s += '\n'
s += ' %i\n' % m[4][1]
s += ' %s\n' % B[4]
s += '%i %i %s %i %s %i %i\n' % t
s += ' %s\n' % B[2]
s += ' %i\n' % m[2][1]
s += '\n'
s += ' %i\n' % m[3][1]
return s
def hide_gender(m):
for y in range(1, 5):
for x in range(1, 5):
i = m[y][x]
if i > 5:
m[y][x] = i - 5
return m
# main
#for x in [1, 2, 3, 4, 6, 7, 8, 9]:
for x in [1, 2, 3, 4]:
for i in [True, False]:
#for i in [True]:
si = i and 'I' or 'H'
m = view(x, i)
m2 = hide_gender(copy.deepcopy(m))
#print('m = '+repr(m))
#print(str_16_new(m))
#print(str_16_new(m2))
#print('iter = '+repr(m.__iter__))
# serialize matrix to string
sm = ''
for y in m:
sm += ''.join(map(str, y))
#sm += ' '.join(map(str, y)) + '\n'
sm2 = ''
for y in m2:
sm2 += ''.join(map(str, y))
sma = tl(sm, '12346789', 'ABCDFGHI')
sma2 = tl(sm2, '1234', 'ABCD')
#print('%s%i = %s = %s' % (si, x, sm, sm2))
#print('%s%i:\n%s' % (si, x, sm))
#print('%s%i:\n%s\n%s' % (si, x, sm, sm2))
#print('%s%i:\n%s\n%s\n%s\n%s' % (si, x, sm, sm2, sma, sma2))
#print('%s%i:\n%s\n%s\n%s\n%s' % (si, x, sm, sm2, sma, sma2))
print(si + str(x) + 'N4 = ' + sm2)
print(si + str(x) + 'L4 = ' + tl(sm2, '1234', 'ABCD'))
print(si + str(x) + 'N8F = ' + sm)
print(si + str(x) + 'L8F = ' + tl(sm, '12346789', 'ABCDFGHI'))
print(si + str(x) + 'N8R = ' + tl(sm, '12346789', '67891234'))
print(si + str(x) + 'L8R = ' + tl(sm, '12346789', 'FGHIABCD'))
# print compact matrix
if False:
sm3 = ''
for y in m:
sm3 += ' '.join(map(str, y)) + '\n'
print(si + str(x) + 'N8:\n' + sm3)
print(si + str(x) + 'L8:\n' + tl(sm3, '12346789', 'ABCDFGHI'))
sm4 = ''
for y in m2:
sm4 += ' '.join(map(str, y)) + '\n'
print(si + str(x) + 'N4:\n' + sm4)
print(si + str(x) + 'L4:\n' + tl(sm4, '1234', 'ABCD'))
#print('%s%i = %s' % (si, x, sm))
#print(sm)
#_print(sm + ' ')
#print(str_16_new(m))
#print(str_16_new(m2))
#print(str_16_new(m2, C=True))
print(tl(str_16_new(m2, C=True), '─IUEA', 'BBDHP').replace('BBBBBBB', '───────').replace('BBB', '───'))
#print(tl(str_16_new(m2, C=True), '─IUEA1234', 'BBDHPAAAA'))
# with gender, coded as number
#print(str_16_new(view(( x + 5 ), i), C=True))
#print(str_cross(m2))
print(tl(str_cross(m2), 'UEAO', 'BDHP'))
| 21.560694 | 105 | 0.418713 |
46d3db3255de71e377112203d843af0995fb6cf3
| 558 |
py
|
Python
|
Utils/py/exportEdgels/exportEdgels.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
Utils/py/exportEdgels/exportEdgels.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
Utils/py/exportEdgels/exportEdgels.py
|
tarsoly/NaoTH
|
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
from naoth.LogReader import LogReader
from naoth.LogReader import Parser
def get_camera_matrix(frame):
cm_bottom = frame["CameraMatrix"]
cm_top = frame["CameraMatrixTop"]
return [frame.number, cm_bottom, cm_top]
def get_edgels(frame):
edgel_percepts = frame["ScanLineEdgelPercept"]
return [frame.number, edgel_percepts]
if __name__ == "__main__":
myParser = Parser()
myParser.register("CameraMatrixTop", "CameraMatrix")
for msg in LogReader("./game.log", myParser, get_edgels):
print(msg)
| 24.26087 | 61 | 0.71147 |
03abdc9a12f16464373de3fba3cbb3a4ed2bbe85
| 247 |
py
|
Python
|
todo/admin.py
|
arkarhtethan/simple-django-todo-list
|
1d91ec11deb4bdcf9902206ddf754205e3d96447
|
[
"MIT"
] | null | null | null |
todo/admin.py
|
arkarhtethan/simple-django-todo-list
|
1d91ec11deb4bdcf9902206ddf754205e3d96447
|
[
"MIT"
] | 9 |
2019-12-04T22:33:07.000Z
|
2022-02-10T08:25:39.000Z
|
todo/admin.py
|
arkarhtethan/simple-django-todo-list
|
1d91ec11deb4bdcf9902206ddf754205e3d96447
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Todo
class TodoAdmin(admin.ModelAdmin):
list_display = ("title","due_date","done")
list_filter = ("done","due_date",)
list_editable = ("due_date",)
admin.site.register(Todo, TodoAdmin)
| 17.642857 | 43 | 0.728745 |
03b9e54d2eb80c6a91a71d3b802ccc16b256d132
| 1,048 |
py
|
Python
|
cs/lambda_cs/03_data_structures/queue_and_stack/dll_queue.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | null | null | null |
cs/lambda_cs/03_data_structures/queue_and_stack/dll_queue.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | 8 |
2020-03-24T17:47:23.000Z
|
2022-03-12T00:33:21.000Z
|
cs/lambda_cs/03_data_structures/queue_and_stack/dll_queue.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | null | null | null |
"""
Data Structures :: Queue
"""
import sys
sys.path.append("../doubly_linked_list")
from doubly_linked_list import DoublyLinkedList
class Queue:
def __init__(self):
"""Implementation of a Queue (FIFO). A doubly-linked list is
used as the underlying data structure because the methods for
manipulating the two data structures are very similar."""
self.storage = DoublyLinkedList()
def enqueue(self, value):
"""Adds an item to the back of the queue.
:param value : Item to be added to the queue.
"""
self.storage.add_to_tail(value)
def dequeue(self):
"""Removes an item from the front of the queue.
:return value : Value of dequeued item or None.
"""
# Empty queue case is handled by DLL method
value = self.storage.remove_from_head()
return value
def len(self):
"""Calls `len()` on the queue.
:return length (int) : Length of queue.
"""
return len(self.storage)
| 26.2 | 69 | 0.614504 |
ff04392cc00e634cc57517c482b44f56af2fad62
| 4,292 |
py
|
Python
|
weeman-master/core/httpd.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
weeman-master/core/httpd.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
weeman-master/core/httpd.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
##
## httpd.py - the main httpd server
##
## Written by @Hypsurus
##
import SimpleHTTPServer
import SocketServer
import urllib2
import cgi
import os
from socket import error as socerr
from core.config import __version__
from core.config import __codename__
from core.misc import printt
from bs4 import BeautifulSoup as bs
class handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
## Set server version
server_version = "Weeman %s (%s)" %(__version__, __codename__)
def do_POST(self):
post_request = []
printt(3, "%s - sent POST request." %self.address_string())
form = cgi.FieldStorage(self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type'],})
try:
from core.shell import url
logger = open("%s.log" %url.replace("https://", "").replace("http://", "").split("/")[0], "w+")
logger.write("## Data for %s\n\n" %url)
for tag in form.list:
tmp = str(tag).split("(")[1]
key,value = tmp.replace(")", "").replace("\'", "").replace(",", "").split()
post_request.append("%s %s" %(key,value))
printt(2, "%s => %s" %(key,value))
logger.write("%s => %s\n" %(key,value))
logger.close()
from core.shell import action_url
create_post(url,action_url, post_request)
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
except socerr as e:
printt(3, "Something wrong: (%s) igonring ..." %str(e))
except Exception as e:
printt(3, "Something wrong: (%s) igonring ..." %str(e))
def log_message(self, format, *args):
printt(3, "Connected : %s" %(self.address_string()))
arg = format%args
if arg.split()[1] == "/":
printt(3, "%s - sent GET request without parameters." %self.address_string())
else:
if arg.split()[1].startswith("/") and "&" in arg.split()[1]:
printt(3, "%s - sent GET request with parameters." %self.address_string())
printt(2, "%s" %arg.split()[1])
class weeman(object):
def __init__(self, url,port):
from core.shell import url
from core.shell import port
self.port = port
self.httpd = None
self.url = url
self.form_url = None;
def request(self,url):
from core.shell import user_agent
opener = urllib2.build_opener()
opener.addheaders = [('User-Agent', user_agent)]
return opener.open(self.url).read()
def clone(self):
printt(3, "Trying to get %s ..." %self.url)
printt(3, "Downloadng wepage ...")
data = self.request(self.url)
data = bs(data, "html.parser")
printt(3, "Modifying the HTML file ...")
for tag in data.find_all("form"):
tag['method'] = "post"
tag['action'] = "ref.html"
with open("index.html", "w") as index:
index.write(data.prettify().encode('utf-8'))
index.close()
printt(3, "the HTML page will redirect to ref.html ...")
def serve(self):
printt(3, "\033[01;35mStarting Weeman %s server on 0.0.0.0:%d\033[00m" %(__version__, self.port))
self.httpd = SocketServer.TCPServer(("", self.port),handler)
self.httpd.serve_forever()
def cleanup(self):
printt(3, "\n:: Running cleanup ...")
## In case weeman will not create ref.html,
## Remove each file in diffrent check.
if os.path.exists("index.html"):
os.remove("index.html")
if os.path.exists("ref.html"):
os.remove("ref.html")
def create_post(url,action_url, post_request):
printt(3, "Creating ref.html ...")
red = open("ref.html","w")
red.write("<body><form id=\"ff\" action=\"%s\" method=\"post\" >\n" %action_url)
for post in post_request:
key,value = post.split()
red.write("<input name=\"%s\" value=\"%s\" type=\"hidden\" >\n" %(key,value))
red.write("<input name=\"login\" type=\"hidden\">")
red.write("<script langauge=\"javascript\">document.forms[\"ff\"].submit();</script>")
red.close()
| 37.982301 | 107 | 0.567568 |
2075cfb93479cc556ec79896863590c0fffb612c
| 2,080 |
py
|
Python
|
whereis-master/whereis-backend/database/database.py
|
jaakaappi/archived-projects
|
be1f754eca7c1434f3a363b0ea8ebcd190a42436
|
[
"MIT"
] | null | null | null |
whereis-master/whereis-backend/database/database.py
|
jaakaappi/archived-projects
|
be1f754eca7c1434f3a363b0ea8ebcd190a42436
|
[
"MIT"
] | 3 |
2021-03-10T13:18:31.000Z
|
2021-05-11T09:20:11.000Z
|
whereis-master/whereis-backend/database/database.py
|
jaakaappi/archived-projects
|
be1f754eca7c1434f3a363b0ea8ebcd190a42436
|
[
"MIT"
] | null | null | null |
from pymongo import MongoClient
from bson.objectid import ObjectId
client = MongoClient('localhost', 27017)
db = client['whereis_database']
def is_empty():
return db.locations.count_documents({}) <= 0 or db.items.count_documents({}) <= 0
def fill_test_db():
location_ids = db.locations.insert_many([{
"name": "Garage",
"map": "map1.png"
},
{
"name": "Hangar",
"map": "map2.png"
}]).inserted_ids
db.items.insert_many([{
"name": "Hammer1",
"description": "Smashing",
"image": "hammer.jpg",
"location": location_ids[0]
},
{
"name": "Hammer2",
"description": "Smashing",
"image": "hammer.jpg",
"location": location_ids[1]
},
{
"name": "Knife1",
"description": "Sharp",
"image": "knife.png",
"location": location_ids[0]
},
{
"name": "Knife2",
"description": "Sharp",
"image": "knife.png",
"location": location_ids[1]
}])
print("== Filled test db ==")
def get_locations():
locations = []
for location in db.locations.find({}):
locations.append({
"id": str(location['_id']),
"name": location['name'],
"map": location['map']
})
return locations
def get_items(count=None, name=None, location=None):
items = []
internal_count = count
query = {}
if name:
query['name'] = name
if name:
query['location'] = ObjectId(location)
for item in db.items.find(query):
items.append({
"id": str(item['_id']),
"name": item['name'],
"description": item['description'],
"image": item['image'],
"location": str(item['location'])
})
if count:
if internal_count > 0:
internal_count -= 1
else:
break
return items
| 24.470588 | 85 | 0.482212 |
20f4fa304b382cb667e05f6fcec36fafee489bc5
| 8,378 |
py
|
Python
|
official/cv/ADNet/src/trainers/RL_tools.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/ADNet/src/trainers/RL_tools.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/ADNet/src/trainers/RL_tools.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import cv2
from src.utils.get_video_infos import get_video_infos
from src.utils.do_action import do_action
from src.utils.overlap_ratio import overlap_ratio
from src.utils.augmentations import CropRegion
from mindspore import ops, nn
import mindspore
class TrackingPolicyLoss(nn.Cell):
def __init__(self):
super(TrackingPolicyLoss, self).__init__()
self.cast = ops.Cast()
# https://github.com/pytorch/examples/blob/master/reinforcement_learning/reinforce.py#L68
def construct(self, saved_log_probs, rewards):
rewards = self.cast(rewards, mindspore.dtype.float32)
saved_log_probs = self.cast(saved_log_probs, mindspore.dtype.float32)
policy_loss = ops.ReduceSum(False)(-saved_log_probs * rewards)
return policy_loss
# TrackingEnvironment for all of the videos in one epoch
# Number of steps can be set in opts['train']['RL_steps'] before initialize this environment
class TrackingEnvironment:
def __init__(self, train_videos, opts, transform, args):
self.videos = [] # list of clips dict
self.opts = opts
self.transform = transform
self.args = args
self.RL_steps = self.opts['train']['RL_steps'] # clip length
video_names = train_videos['video_names']
video_paths = train_videos['video_paths']
bench_names = train_videos['bench_names']
vid_idxs = np.random.permutation(len(video_names))
for vid_idx in vid_idxs:
# dict consist of set of clips in ONE video
clips = {
'img_path': [],
'frame_start': [],
'frame_end': [],
'init_bbox': [],
'end_bbox': [],
'vid_idx': [],
}
# Load current training video info
video_name = video_names[vid_idx]
video_path = video_paths[vid_idx]
bench_name = bench_names[vid_idx]
vid_info = get_video_infos(bench_name, video_path, video_name)
if self.RL_steps is None:
self.RL_steps = len(vid_info['gt'])-1
vid_clip_starts = [0]
vid_clip_ends = [len(vid_info['gt'])-1]
else:
vid_clip_starts = np.array(range(len(vid_info['gt']) - self.RL_steps))
vid_clip_starts = np.random.permutation(vid_clip_starts)
vid_clip_ends = vid_clip_starts + self.RL_steps
# number of clips in one video
num_train_clips = min(opts['train']['rl_num_batches'], len(vid_clip_starts))
print("num_train_clips of vid " + str(vid_idx) + ": ", str(num_train_clips))
for clipIdx in range(num_train_clips):
frameStart = vid_clip_starts[clipIdx]
frameEnd = vid_clip_ends[clipIdx]
clips['img_path'].append(vid_info['img_files'][frameStart:frameEnd])
clips['frame_start'].append(frameStart)
clips['frame_end'].append(frameEnd)
clips['init_bbox'].append(vid_info['gt'][frameStart])
clips['end_bbox'].append(vid_info['gt'][frameEnd])
clips['vid_idx'].append(vid_idx)
if num_train_clips > 0: # small hack
self.videos.append(clips)
self.clip_idx = -1 # hack for reset function
self.vid_idx = 0
self.state = None # current bbox
self.gt = None # end bbox
self.current_img = None # current image frame
self.current_patch = None # current patch (transformed)
self.current_img_idx = 0
self.reset()
# return state, reward, done, info. Also update the curr_patch based on the new bounding box
# state: next bounding box
# reward: the reward
# done: True if finishing one clip.
# info: a dictionary
def step(self, action):
info = {
'finish_epoch': False
}
# do action
self.state = do_action(self.state, self.opts, action, self.current_img.shape)
self.current_patch, _, _, _ = self.transform(self.current_img, self.state)
if action == self.opts['stop_action']:
reward, done, finish_epoch = self.go_to_next_frame()
info['finish_epoch'] = finish_epoch
else: # just go to the next patch (still same frame/current_img)
reward = 0
done = False
self.current_patch, _, _, _ = self.transform(self.current_img, self.state)
return self.state, reward, done, info
# reset environment to new clip.
# Return finish_epoch status: False if finish the epoch. True if still have clips remain
def reset(self):
while True:
self.clip_idx += 1
# if the clips in a video are finished... go to the next video
if self.clip_idx >= len(self.videos[self.vid_idx]['frame_start']):
self.vid_idx += 1
self.clip_idx = 0
if self.vid_idx >= len(self.videos):
self.vid_idx = 0
# one epoch finish... need to reinitialize the class to use this again randomly
return True
# initialize state, gt, current_img_idx, current_img, and current_patch with new clip
self.state = self.videos[self.vid_idx]['init_bbox'][self.clip_idx]
self.gt = self.videos[self.vid_idx]['end_bbox'][self.clip_idx]
# frameStart = self.videos[self.vid_idx]['frame_start'][self.clip_idx]
self.current_img_idx = 1 # self.current_img_idx = frameStart + 1
self.current_img = cv2.imread(self.videos[self.vid_idx]['img_path'][self.clip_idx][self.current_img_idx])
self.current_patch, _, _, _ = self.transform(self.current_img, np.array(self.state))
if self.gt != '': # small hack
break
return False
def get_current_patch(self):
return self.current_patch
def get_current_train_vid_idx(self):
return self.videos[self.vid_idx]['vid_idx'][0]
def get_current_patch_unprocessed(self):
crop = CropRegion()
state_int = [int(x) for x in self.state]
current_patch_unprocessed, _, _, _ = crop(self.current_img, state_int)
return current_patch_unprocessed.astype(np.uint8)
def get_state(self):
return self.state
def get_current_img(self):
return self.current_img
def go_to_next_frame(self):
self.current_img_idx += 1
finish_epoch = False
# if already in the end of a clip...
if self.current_img_idx >= len(self.videos[self.vid_idx]['img_path'][self.clip_idx]):
# calculate reward before reset
reward = reward_original(np.array(self.gt), np.array(self.state))
print("reward=" + str(reward))
# reset (reset state, gt, current_img_idx, current_img and current_img_patch)
finish_epoch = self.reset() # go to the next clip (or video)
done = True # done means one clip is finished
# just go to the next frame (means new patch and new image)
else:
reward = 0
done = False
# note: reset already read the current_img and current_img_patch
self.current_img = cv2.imread(self.videos[self.vid_idx]['img_path'][self.clip_idx][self.current_img_idx])
self.current_patch, _, _, _ = self.transform(self.current_img, self.state)
return reward, done, finish_epoch
def reward_original(gt, box):
iou = overlap_ratio(gt, box)
if iou > 0.7:
reward = 1
else:
reward = -1
return reward
| 37.738739 | 117 | 0.619241 |
45b1f8256ecf807b0c9b4c5a0d47c3013c46a614
| 2,389 |
py
|
Python
|
src/onegov/swissvotes/fields/policy_area.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/swissvotes/fields/policy_area.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/swissvotes/fields/policy_area.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
from json import dumps
from onegov.swissvotes import _
from onegov.swissvotes.models import PolicyArea
from wtforms import SelectMultipleField
from wtforms.widgets import Select
class PolicyAreaWidget(Select):
""" The widget for the React Dropdown Tree. """
def __call__(self, field, **kwargs):
kwargs['class_'] = 'policy-selector'
kwargs['data-tree'] = dumps(field.tree)
kwargs['data-placehoder-text'] = field.gettext(
_("Select Some Options")
)
kwargs['data-no-matches-text'] = field.gettext(_("No results match"))
return super().__call__(field, **kwargs)
@classmethod
def render_option(cls, value, label, selected, **kwargs):
""" Adds a level specific class to each option.
This allows to see the hierarchie in case the client has disabled
javascript.
"""
kwargs['class'] = 'level-{}'.format(PolicyArea(value).level)
return super(PolicyAreaWidget, cls).render_option(
value, label, selected, **kwargs
)
class PolicyAreaField(SelectMultipleField):
""" A select field with React Dropdown Tree support. """
widget = PolicyAreaWidget(multiple=True)
def __init__(self, *args, **kwargs):
self.tree = kwargs.pop('tree', [])
super().__init__(*args, **kwargs)
@property
def tree(self):
""" Returns the tree data and automatically preselects the selected
select options.
"""
tree = deepcopy(self._tree)
def preselect(item):
checked = item['value'] in self.data
expanded = False
for child in item['children']:
expanded = True if preselect(child) else expanded
item['checked'] = checked
item['expanded'] = expanded
return expanded or checked
for item in tree:
preselect(item)
return tree
@tree.setter
def tree(self, value):
""" Sets the tree data and automatically populates the select's
choices.
"""
self._tree = value
def add_choices(item):
self.choices.append((item['value'], item['label']))
for child in item['children']:
add_choices(child)
self.choices = []
for item in value:
add_choices(item)
| 28.783133 | 77 | 0.604018 |
b36234dcca716d2c05b5494e3faae0b458d9a979
| 3,265 |
py
|
Python
|
src/CalculateDistances.py
|
animucki/2mmn40
|
c54c0e4e9c801d63f048fbb5d9abd8fe9432cfdc
|
[
"Unlicense"
] | null | null | null |
src/CalculateDistances.py
|
animucki/2mmn40
|
c54c0e4e9c801d63f048fbb5d9abd8fe9432cfdc
|
[
"Unlicense"
] | null | null | null |
src/CalculateDistances.py
|
animucki/2mmn40
|
c54c0e4e9c801d63f048fbb5d9abd8fe9432cfdc
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def ReadTrajectory(trajFile):
"""This will read the WHOLE trajectory into memory. This will not be possible
later very large trajectores. I'll let you worry about how to deal with that yourself..."""
trajectory=[]
with open(trajFile, "r") as tF:
line = tF.readline()
while line is not "":
#first line is number of atoms
N = int(line.strip())
tF.readline().strip() # second line is a comment that we throw away
q = []
for i in range(N):
line = tF.readline().strip().split(" ")
for c in line[1:]:
if c is not "":
q.append(float(c))
trajectory.append(np.array(q))
line = tF.readline()
return trajectory, N
t, n = ReadTrajectory("Hydrogen.xyz")
print("trajectory contains {} atoms and {} steps".format(n, len(t)))
d_hists=[]
for s,q in enumerate(t):
print("Processing step {}".format(s))
# First I will reshape q into a an (n,3) matrix instead of
# to begin with q is a vector of length 3*n
q = q.reshape(n, 3) # compare this to your trajectory
# make sure you have the same values
# as in the trajectory file
# Now I will calculate all pair-wise distances:
# First I will do this the easy way
# r1 = np.zeros((n,n)) # I will store the pair-wise distances
# # in an n,n matrix
# print("Start slow loop:")
# for i in range(n):
# for j in range(n):
# r1[i,j] = np.linalg.norm(q[i]-q[j]) # what is 'norm' here?
# print("Done slow loop")
# Generally speaking, looping through a np.array
# as above is VERY slow (much the same as looping matlab matrices)
# So I will show you a trick that will calculate the same as above
# much faster...
print("start faster calculation:")
dr = q - q[:, np.newaxis] # dr is in R^(nxnx3)
# have a careful look at dr in
# spyder's variable explorer
# What do you think this tensor holds?
# what does the np.newaxis mean?
r2 = np.linalg.norm(dr, axis=2) # what does 'axis' mean here?
print("done faster calculation")
# r1 and r2 should be very similar (actually identical)
# m = np.abs(r1-r2) > 1e-3 # try these operations individually in spyder
# and understand what the output is
print(m.sum()) # what does this mean? Why is this sum interesting?
# hint: could it be some kind of norm?
# You should notice that the same calculation without looping is
# MUCH faster.
# You may comment out the slow loop for your calculations.
# don't forget to also comment out the equality check
# Now I will calculate the histograms of the distances
# and store it
d_hists.append(np.histogram(r2.reshape((n*n))))
# I'm going to plot one of the histograms, you will need more for
# your exercise
x = d_hists[10][1]
x = (x[1:] - x[:1])*0.5
y = d_hists[10][0]
plt.plot(x, y)
plt.xlabel('(nm)')
plt.show()
| 34.368421 | 91 | 0.579479 |
b3783ccaa2bfb2dfb0e0d47859ce56110f870aa4
| 10,014 |
py
|
Python
|
yhh_recommend_text.py
|
sambabypapapa/CralwerSet
|
a76e0660c42ce7aac20b8d07ccc454b6636a8a2a
|
[
"Apache-2.0"
] | 5 |
2020-08-17T08:37:16.000Z
|
2021-06-07T05:02:05.000Z
|
yhh_recommend_text.py
|
sambabypapapa/CralwerSet
|
a76e0660c42ce7aac20b8d07ccc454b6636a8a2a
|
[
"Apache-2.0"
] | null | null | null |
yhh_recommend_text.py
|
sambabypapapa/CralwerSet
|
a76e0660c42ce7aac20b8d07ccc454b6636a8a2a
|
[
"Apache-2.0"
] | 1 |
2021-06-07T05:02:10.000Z
|
2021-06-07T05:02:10.000Z
|
"""
爬取淘宝有好货推荐商品及所属类型
"""
import time
import datetime
import json
import requests
import random
import CralwerSet.connect_mysql as connect_mysql
import re
from urllib import parse
import easygui as g
import hashlib
from requests.packages import urllib3
import traceback
import pickle
import pymysql
urllib3.disable_warnings()
class youHH():
def __init__(self):
self.conn_T = connect_mysql.test()
self.cur_T = self.conn_T.cursor()
self.conn_W = connect_mysql.w_shark_erp()
self.cur_W = self.conn_W.cursor()
# 数据库已有的文章id列表
self.have_list = []
self.users_num = {}
self.headers = {
"Connection": "keep-alive",
"User-Agent": "Mozilla/5.0 (Linux; Android 5.1.1; xiaomi mix Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.100 Safari/537.36 AliApp(TB/9.1.0) TTID/600000@taobao_android_9.1.0 WindVane/8.5.0 900X1600 UT4Aplus/0.2.16",
"Cookie": "_m_h5_tk=36b5227cd1a1e340e4d56bcc93555f2f_1587526955005; _m_h5_tk_enc=7385708053b9b4519913b71659d347aa;"
}
def get_cookies(self, response):
_m_h5_tk = response.cookies._cookies['.taobao.com']['/']['_m_h5_tk'].value
_m_h5_tk_enc = response.cookies._cookies['.taobao.com']['/']['_m_h5_tk_enc'].value
cookies = f'_m_h5_tk={_m_h5_tk};_m_h5_tk_enc={_m_h5_tk_enc};'
self.headers['Cookie'] = cookies
return cookies
def get_sign(self, t, token, page):
"""
执行js,构造sign参数
:param data:
:return:
"""
if page == 1:
data = token + "&" + str(
t) + "&12574478&{\"d\":\"{\\\"tce_sid\\\":\\\"1891397\\\",\\\"tce_vid\\\":\\\"0\\\",\\\"tid\\\":\\\"\\\",\\\"tab\\\":\\\"\\\",\\\"topic\\\":\\\"selected_new_0\\\",\\\"count\\\":\\\"\\\",\\\"env\\\":\\\"online\\\",\\\"pageNo\\\":\\\"1\\\",\\\"psId\\\":\\\"51817\\\",\\\"bizCode\\\":\\\"steins.goodItem\\\",\\\"type\\\":\\\"selected_v2\\\",\\\"page\\\":\\\"1\\\",\\\"pageSize\\\":\\\"20\\\",\\\"tabId\\\":\\\"0\\\",\\\"contentIds\\\":\\\"2500000222552474145%2C2500000213511536931\\\",\\\"viewTopicId\\\":\\\"\\\",\\\"line\\\":\\\"\\\",\\\"home_clickItemId\\\":\\\"590236412947\\\",\\\"src\\\":\\\"phone\\\"}\"}"
# {"d":"{\"tce_sid\":\"1891397\",\"tce_vid\":\"0\",\"tid\":\"\",\"tab\":\"\",\"topic\":\"selected_new_0\",\"count\":\"\",\"env\":\"online\",\"pageNo\":\"1\",\"psId\":\"51817\",\"bizCode\":\"steins.goodItem\",\"type\":\"selected_v2\",\"page\":\"1\",\"pageSize\":\"20\",\"tabId\":\"0\",\"contentIds\":\"2500000211461002193%2C2500000207797210655\",\"viewTopicId\":\"\",\"line\":\"\",\"home_clickItemId\":\"570216112026\",\"src\":\"phone\"}"}
else:
data = token + "&" + str(
t) + "&12574478&{\"d\":\"{\\\"tce_sid\\\":\\\"1891397\\\",\\\"tce_vid\\\":\\\"0\\\",\\\"tid\\\":\\\"\\\",\\\"tab\\\":\\\"\\\",\\\"topic\\\":\\\"selected_new\\\",\\\"count\\\":\\\"\\\",\\\"env\\\":\\\"online\\\",\\\"pageNo\\\":\\\"1\\\",\\\"psId\\\":\\\"51817\\\",\\\"bizCode\\\":\\\"steins.goodItem\\\",\\\"type\\\":\\\"selected_v2\\\",\\\"page\\\":\\\"%s\\\",\\\"pageSize\\\":\\\"20\\\",\\\"tabId\\\":\\\"0\\\",\\\"clickedIds\\\":\\\"\\\",\\\"src\\\":\\\"phone\\\"}\"}" % (
page)
m = hashlib.md5(data.encode())
sign = m.hexdigest()
return sign, parse.quote(data.split("&")[-1])
def get_content_sign(self, content_id, token, t):
data = token + "&" + str(
t) + "&12574478&{\"contentId\":\"%s\",\"source\":\"youhh_tuji\",\"type\":\"h5\",\"params\":\"\",\"business_spm\":\"\",\"track_params\":\"\"}" % (
content_id)
m = hashlib.md5(data.encode())
sign = m.hexdigest()
return sign, parse.quote(data.split("&")[-1])
def closeSql_T(self):
"""
断开T数据库连接
:return:
"""
self.cur_T.close()
self.conn_T.close()
def closeSql_W(self):
"""
断开W数据库连接
:return:
"""
self.cur_W.close()
self.conn_W.close()
def quchong(self, text, long, LIST):
for each in LIST:
if each[1] == long and text[0] == each[0][0] and text == each[0]:
return False
return True
def getContent(self, content_id):
"""
:param content_id:文章编号
:return: 文章内容
"""
while True:
self.headers[
'Referer'] = f'https://market.m.taobao.com/apps/market/content/index.html?contentId={content_id}&source=youhh_tuji'
temp = int(time.time() * 1000)
_m_h5_tk = re.findall('_m_h5_tk=[^;]*', self.headers['Cookie'])[0].split('=')[1]
token = _m_h5_tk.split('_')[0]
sign, data = self.get_content_sign(content_id, token, temp)
url = f"https://h5api.m.taobao.com/h5/mtop.taobao.beehive.detail.contentservicenewv2/1.0/?jsv=2.5.1&appKey=12574478&t={temp}&sign={sign}&api=mtop.taobao.beehive.detail.contentservicenewv2&v=1.0&AntiCreep=true&AntiFlood=true&timeout=5000&type=jsonp&dataType=jsonp&callback=mtopjsonp2&data={data}"
response = requests.get(url, headers=self.headers, verify=False)
response_text = response.text[12:-1]
if "令牌过期" in response_text:
self.get_cookies(response)
else:
break
info = json.loads(response_text)
time.sleep(random.randint(0, 10))
text_list = []
if "photos" in info['data']['models']['content'].keys():
for photo in info['data']['models']['content']['photos']:
if 'desc' in photo.keys():
text_list.append([photo['desc'], len(photo['desc'].encode())])
uniqe_text = []
while len(text_list):
text, long = text_list.pop()
if self.quchong(text, long, text_list):
uniqe_text.append([text, long])
print(text, long)
return uniqe_text
def requestUrl(self, page):
"""
请求页面
:param page:
:return:返回页面json数据
"""
while True:
temp = int(time.time() * 1000)
_m_h5_tk, _m_h5_tk_enc = (re.findall('_m_h5_tk=[^;]*', self.headers['Cookie'])[0].split('=')[1],
re.findall('_m_h5_tk_enc=[^;]*', self.headers['Cookie'])[0].split('=')[1])
token = _m_h5_tk.split('_')[0]
sign, data = self.get_sign(temp, token, page)
url = f'https://h5api.m.taobao.com/h5/mtop.taobao.tceget.steins.gooditem.xget/1.0/?jsv=2.4.5&appKey=12574478&t={temp}&sign={sign}&AntiCreep=true&api=mtop.taobao.tceget.steins.gooditem.xget&v=1.0&dataType=jsonp&timeout=20000&H5Request=true&preventFallback=true&type=jsonp&callback=mtopjsonp2&data={data}'
response = requests.get(url, headers=self.headers, verify=False)
response_text = response.text[12:-1]
if "令牌过期" in response_text:
self.get_cookies(response)
else:
break
info = json.loads(response_text)
time.sleep(random.randint(5, 20))
return info
def findClass(self, title):
"""
判断该文章的类型
:param title:
:return: 该文章最有可能的类型
"""
sql = f"""SELECT t6.cat,t5.num FROM (select t4.MAIN_ID MAIN_ID,count(t4.MAIN_ID) num FROM (SELECT t2.CLASSIFY_ID CLASSIFY_ID FROM (select URL_ID, CONTENT from crawler_commodity_module_description where match(CONTENT) against('{title}') limit 100) t1, cm_commodity t2 where t1.URL_ID=t2.URL_ID ) t3, class_id t4 where t3.CLASSIFY_ID = t4.ID GROUP BY t4.MAIN_ID) t5, class_id t6 WHERE t6.ID=t5.MAIN_ID ORDER BY t5.num desc LIMIT 1;"""
while True:
try:
self.cur_W.execute(sql)
info = self.cur_W.fetchone()
break
except pymysql.err.OperationalError:
print('由于连接方在一段时间后没有正确答复或连接的主机没有反应,连接尝试失败。')
self.conn_W.ping(True)
except pymysql.err.ProgrammingError:
print(sql)
return '未知分类'
if info:
return info[0][0:-1]
else:
return '未知分类'
def insertEssay(self, contentid, title, mainClass):
"""
写入数据库
:param contentid: 文章编号
:param title: 文章标题
:param mainClass: 文章最可能的分类
:return:
"""
sql = f"""insert into yhh_essays(CONTENT_ID, TITLE, MAIN_CLASSIFY, UPDATE_TIME) values({contentid},'{title.replace("'", '')}','{mainClass}',now()); """
self.cur_T.execute(sql)
def insertText(self, text_list, contentId):
sql = "insert into cm_text_temp(TEXT,`LONG`) values(%s,%s);"
self.cur_W.executemany(sql, text_list)
self.cur_T.execute(f"update yhh_essays set GET_CONTENT=1 where ID={contentId};")
self.conn_T.commit()
def getHaveList(self):
"""
获取已存在文章列表
:return:
"""
sql = """select CONTENT_ID from yhh_essays;"""
self.cur_T.execute(sql)
result = self.cur_T.fetchall()
if result:
for each in result:
self.have_list.append(int(each[0]))
return self.have_list
def main(self, id, contentId):
try:
text_list = self.getContent(int(str(contentId)[7:]))
self.insertText(text_list, id)
except Exception as e:
traceback.print_exc()
return False
return True
def main():
yhh = youHH()
# 查询没有文案的记录
havnt_text_essay_sql = "select ID, CONTENT_ID from yhh_essays where GET_CONTENT=0;"
yhh.cur_T.execute(havnt_text_essay_sql)
# 逐条记录查询文案
for id, content_id in yhh.cur_T.fetchall():
result = yhh.main(id, content_id)
yhh.closeSql_T()
yhh.closeSql_W()
if __name__ == '__main__':
main()
| 43.350649 | 625 | 0.546036 |
2b9fbdaff827a00d9a129888170532f40b7bc014
| 660 |
py
|
Python
|
tests/test_logik.py
|
fossabot/superstructure
|
f4ab5cac269fb3dedfbd3a54c441af23edf3840b
|
[
"MIT"
] | null | null | null |
tests/test_logik.py
|
fossabot/superstructure
|
f4ab5cac269fb3dedfbd3a54c441af23edf3840b
|
[
"MIT"
] | null | null | null |
tests/test_logik.py
|
fossabot/superstructure
|
f4ab5cac269fb3dedfbd3a54c441af23edf3840b
|
[
"MIT"
] | null | null | null |
import unittest
from superstructure.logik import Begriff
from superstructure.grundbegriffe import Einzelheit
from superstructure.geist import Bewusstsein
class TestLogik(unittest.TestCase):
def test_basic_logik(self):
b = Bewusstsein(name="TestBewusstsein")
i = Begriff(name="I")
j = Begriff(name="J", allgemeinheit_id=i.id)
i.einzelheit = j.id
b.learn(i)
b.learn(j)
# self.assertEqual(i.allgemeinheit, Begriff().id)
self.assertEqual(j.allgemeinheit, i.id)
self.assertTrue(b.relation_applies(Einzelheit, [b.get(i.einzelheit), j]))
if __name__ == "__main__":
unittest.main()
| 27.5 | 81 | 0.683333 |
2bcbdbfe08769f4e09303ed4939454168cf6737e
| 2,159 |
py
|
Python
|
similaritycalculation/generalSimilarity.py
|
Geosoft2/Geosoftware-II-AALLH
|
bdb61d9a1111b9082ec2b9f309998c5f2166975e
|
[
"MIT"
] | null | null | null |
similaritycalculation/generalSimilarity.py
|
Geosoft2/Geosoftware-II-AALLH
|
bdb61d9a1111b9082ec2b9f309998c5f2166975e
|
[
"MIT"
] | 38 |
2018-10-24T08:51:58.000Z
|
2021-12-13T19:54:39.000Z
|
similaritycalculation/generalSimilarity.py
|
Geosoft2/Geosoftware-II-AALLH
|
bdb61d9a1111b9082ec2b9f309998c5f2166975e
|
[
"MIT"
] | 12 |
2018-10-24T08:20:13.000Z
|
2019-08-12T08:10:14.000Z
|
import math
import logging
LOGGER = logging.getLogger(__name__)
def sameDatatype(fileEnding1, fileEnding2):
"""checks if two datatypes are the same or from same family
:param fileEnding1 String for instance ".tiff"
:param fileEnding2 String for instance ".geotiff"
:returns: 1 if they are the same, otherwise 0
"""
same1, same2 = -1,-1
same1 = fileEnding1.find(fileEnding2.replace(".", "")) # 2 in 1
same2 = fileEnding2.find(fileEnding1.replace(".", "")) # 1 in 2
LOGGER.info("Datatypes %s and %s" % (fileEnding1, fileEnding2))
return 1 if same1>=0 or same2>=0 else 0
def sameAuthor(author1, author2):
"""checks if two Authors are the same
:param author1 String with Author
:param author2 String with Author
:returns: 1 if they are the same, otherwise 0
"""
LOGGER.info("Authors %s and %s" % (author1, author2))
return 1 if author1 == author2 else 0
def similarTitle(title1, title2):
"""checks if two strings are similar in reference to an amount of same characters
:param title1 String the first title
:param title2 String the second title
:returns: number between 1 and 0
"""
LOGGER.info("Titles %s and %s" % (title1, title2))
countList = 0
if len(title1) >= len(title2):
# searches for same caracters in both strings
charList = []
for i in title2:
# check if char has already been found
if i not in charList:
charList.append(i)
# count how often the char appears in the other string
countList += title1.count(i)
LOGGER.info("List of same characters: " + str(charList))
percent = 0
if len(title1) != 0:
percent = countList/len(title1)
return percent
else:
charList = []
for i in title1:
if i not in charList:
charList.append(i)
countList += title2.count(i)
LOGGER.info("List of same characters: " + str(charList))
percent = 0
if len(title2) != 0:
percent = countList/len(title2)
return percent
| 34.269841 | 85 | 0.612784 |
47079df10a341d54724572c4f55fd5e3ba482151
| 2,413 |
py
|
Python
|
research/cv/dcgan/src/generator.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/dcgan/src/generator.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/dcgan/src/generator.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""dcgan generator"""
from mindspore import nn
from src.cell import Normal
from src.config import dcgan_imagenet_cfg as cfg
def convt(in_channels, out_channels, kernel_size, stride=1, padding=0, pad_mode="pad"):
weight_init = Normal(mean=0, sigma=0.02)
return nn.Conv2dTranspose(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
weight_init=weight_init, has_bias=False, pad_mode=pad_mode)
def bm(num_features):
gamma_init = Normal(mean=1, sigma=0.02)
return nn.BatchNorm2d(num_features=num_features, gamma_init=gamma_init)
class Generator(nn.Cell):
"""
DCGAN Generator
"""
def __init__(self):
super(Generator, self).__init__()
self.generator = nn.SequentialCell()
# input is Z, going into a convolution
self.generator.append(convt(cfg.latent_size, cfg.feature_size * 8, 4, 1, 0))
self.generator.append(bm(cfg.feature_size * 8))
self.generator.append(nn.ReLU())
# state size. 512 x 4 x 4
self.generator.append(convt(cfg.feature_size * 8, cfg.feature_size * 4, 4, 2, 1))
self.generator.append(bm(cfg.feature_size * 4))
self.generator.append(nn.ReLU())
# state size. 256 x 8 x 8
self.generator.append(convt(cfg.feature_size * 4, cfg.feature_size * 2, 4, 2, 1))
self.generator.append(bm(cfg.feature_size * 2))
self.generator.append(nn.ReLU())
# state size. 128 x 16 x 16
self.generator.append(convt(cfg.feature_size * 2, cfg.channel_size, 4, 2, 1))
self.generator.append(nn.Tanh())
# state size. 3 x 32 x 32
def construct(self, x):
return self.generator(x)
| 39.557377 | 89 | 0.656444 |
472b1fb39849210594f1d8132cbf1b5fce24375b
| 359 |
py
|
Python
|
exercises/pt/test_02_10_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/pt/test_02_10_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/pt/test_02_10_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
def test():
assert (
"doc1.similarity(doc2)" or "doc2.similarity(doc1)" in __solution__
), "Você está comparando a similaridade entre os dois documentos?"
assert (
0 <= float(similarity) <= 1
), "O valor da similaridade deve ser um número de ponto flutuante. Você fez este cálculo corretamente?"
__msg__.good("Muito bem!")
| 39.888889 | 107 | 0.668524 |
1b61643193609c85bbaf600eb6fb729daf6a4097
| 1,928 |
py
|
Python
|
cs/lambda_cs/05_hash_tables_and_blockchain/Hash-Tables/notes/arrays_notes.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | null | null | null |
cs/lambda_cs/05_hash_tables_and_blockchain/Hash-Tables/notes/arrays_notes.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | 8 |
2020-03-24T17:47:23.000Z
|
2022-03-12T00:33:21.000Z
|
cs/lambda_cs/05_hash_tables_and_blockchain/Hash-Tables/notes/arrays_notes.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | null | null | null |
"""
Hash tables :: Day 1 Notes: Arrays
An array:
* Stores a sequence of elements
* Each element must be the same data type
* Occupies a contiguous block of memory
* Can access data in constant time with this equation:
`memory_address = starting_address + index * data_size`
"""
class DynamicArray:
def __init__(self, capacity=1):
self.count = 0 # Number of elements in the array
self.capacity = capacity # Total amount of storage in array
self.storage = [None] * capacity
def insert(self, index, value):
"""Inserts a value into list at index.
Complexity: O(n)"""
# Check if we have enough capacity
if self.count >= self.capacity:
# If not, make more room
self.resize()
# Shift every item after index to right by 1
for i in range(self.count, index, -1):
self.storage[i] = self.storage[i - 1]
# Add new value at the index
self.storage[index] = value
# Increment count
self.count += 1
def append(self, value):
"""Appends a value to the end of array.
Complexity: O(1)"""
# Check if array has enough capacity
if self.count >= self.capacity:
# If not, resize up
self.resize()
# Add value to the index of count
self.storage[self.count] = value
# Increment count
self.count += 1
def resize(self):
"""Doubles the capacity of array."""
self.capacity *= 2
# Allocate a new storage array with double capacity
new_storage = [None] * self.capacity
# Copy all ements from old storage to new
for i in range(self.count):
new_storage[i] = self.storage[i]
self.storage = new_storage
a = DynamicArray(2)
a.insert(0, 19)
a.insert(0, 14)
print(a.storage)
a.append(9)
a.append(8)
print(a.storage)
a.append(7)
print(a.storage)
| 27.942029 | 68 | 0.605809 |
940fa25f5d2e638547e03c9cd9e16acdbea8cf89
| 9,547 |
py
|
Python
|
.github/tools/metrics/benchmark_capture.py
|
devinrsmith/deephaven-core
|
3a6930046faf1cd556f62a914ce1cfd7860147b9
|
[
"MIT"
] | 55 |
2021-05-11T16:01:59.000Z
|
2022-03-30T14:30:33.000Z
|
.github/tools/metrics/benchmark_capture.py
|
devinrsmith/deephaven-core
|
3a6930046faf1cd556f62a914ce1cfd7860147b9
|
[
"MIT"
] | 943 |
2021-05-10T14:00:02.000Z
|
2022-03-31T21:28:15.000Z
|
.github/tools/metrics/benchmark_capture.py
|
devinrsmith/deephaven-core
|
3a6930046faf1cd556f62a914ce1cfd7860147b9
|
[
"MIT"
] | 29 |
2021-05-10T11:33:16.000Z
|
2022-03-30T21:01:54.000Z
|
#!/usr/bin/env python3
import time
import click
import os
import csv
import requests
import json
import logging
from pprint import pprint
from datetime import timedelta
from jinja2 import Environment, FileSystemLoader
from google.api_core.exceptions import NotFound, InternalServerError
from google.cloud import monitoring_v3
from google.cloud import monitoring_dashboard_v1
# Lable limit of 10 is a hard API limit
GCP_LABEL_LIMIT = 10
# Dashboard limits, 10 is an organic floor, 40 is a hard API limit
DASHBOARD_METRIC_FLOOR = 10
DASHBOARD_METRIC_CEILING = 40
# Setup logger
logger = logging.getLogger("benchmark_capture")
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
# Capture time for metrics
now = time.time()
# Setup GCP clients
monitoring_client = monitoring_v3.MetricServiceClient()
dashboard_client = monitoring_dashboard_v1.DashboardsServiceClient()
def record_metric(project_name, metric):
"""Create TS entry for captured metric"""
global logger
global now
global monitoring_client
series = monitoring_v3.TimeSeries()
series.resource.type = "global"
series.metric.type = f"custom.googleapis.com/{metric['Benchmark']}"
# Required to maintain uniqueness of each permutation
series.metric.labels["Run"] = metric["Run"]
series.metric.labels["Iteration"] = metric["Iteration"]
# Populate dynamic labels, GCP limit is 10 labels per descriptor
for key in list(metric.keys()):
if key not in ["Benchmark", "Run", "Iteration", "Score"]:
series.metric.labels[key] = metric[key]
if len(series.metric.labels) > GCP_LABEL_LIMIT:
logger.warn(
f"Exiting metric label loop, limit of {GCP_LABEL_LIMIT} labels."
)
break # Break out, we have hit limit on labels
seconds = int(now)
nanos = int((now - seconds) * 10 ** 9)
interval = monitoring_v3.TimeInterval(
{"end_time": {"seconds": seconds, "nanos": nanos}}
)
point = monitoring_v3.Point(
{"interval": interval, "value": {"double_value": float(metric["Score"])}}
)
series.points = [point]
logger.info(
f"Publishing {series.resource.type}/{series.metric.type}: {metric['Score']}"
)
try:
monitoring_client.create_time_series(name=project_name, time_series=[series])
except InternalServerError:
logger.error(
f"Failed to publish metric {series.metric.type}, this may be because the metric descriptor has been recently created. Will retry on the next run."
)
def get_dashboard(project_name, title, index=1):
"""Attempt to retrieve a dashboard and return the JSON"""
global logger
dashboard_request = monitoring_dashboard_v1.types.GetDashboardRequest(
name=f"{project_name}/dashboards/{title}-{index}"
)
try:
dashboard = dashboard_client.get_dashboard(request=dashboard_request)
logger.info(f"Found dashboard {project_name}/dashboards/{title}-{index}.")
return dashboard
except NotFound:
logger.info(
f"Dashboard {project_name}/dashboards/{title}-{index} does not exist."
)
return None
def generate_dashboard(
template_path, project_name, title, metrics, filter_keys, index=1
):
"""Generate JSON template and return Python object representation of template for later processing."""
global logger
logger.info(
f"Generating dashboard template {project_name}/dashboards/{title}-{index} with {len(metrics)} metrics."
)
file_loader = FileSystemLoader(template_path)
env = Environment(loader=file_loader)
template = env.get_template("benchmark_dashboard.json.j2")
dashboard_template = json.loads(
template.render(
dashboard_path=f"{project_name}/dashboards/{title}-{index}",
title=f"{title} ({index})",
metrics=metrics,
filter_keys=filter_keys,
group_by_keys=["Iteration"],
)
)
return dashboard_template
def publish_dashboards(project_name, title, dashboard_templates):
"""Populate JSON dashboard template and use it to create/update a GCP Dashboard in project."""
global logger
for idx, dashboard_template in enumerate(dashboard_templates):
# Create Dashboard PB
dashboard = monitoring_dashboard_v1.Dashboard(dashboard_template)
# Fetch dashboard to see if we need to create or update in place
existing_dashboard = get_dashboard(project_name, title, idx + 1)
if existing_dashboard is None: # Create new dashboard
dashboard_request = monitoring_dashboard_v1.types.CreateDashboardRequest(
parent=project_name, dashboard=dashboard
)
logger.info(
f"Publishing new dashboard {project_name}/dashboards/{title}-{idx + 1}."
)
dashboard_client.create_dashboard(dashboard_request)
else: # Update existing dashboard
# Ensure we target returned version of the dashboard
dashboard.etag = existing_dashboard.etag # <-- Broke everything :(
dashboard_request = monitoring_dashboard_v1.types.UpdateDashboardRequest(
dashboard=dashboard
)
logger.info(
f"Updating dashboard {project_name}/dashboards/{title}-{idx + 1}."
)
dashboard_client.update_dashboard(dashboard_request)
def get_metadata(path):
"""Get GCP metadata object for requested path"""
global logger
logger.debug(f"Querying {path} from instance metadata service.")
url = f"http://metadata.google.internal/{path}"
headers = {"Metadata-Flavor": "Google"}
r = requests.get(url, headers=headers)
if r.status_code != 404:
return r.text
else:
return None
def get_project_id():
"""Retrieve GCP project from the instance metadata"""
global logger
logger.info("Attempting to query project ID from instance metadata service.")
return get_metadata("/computeMetadata/v1/project/numeric-project-id")
@click.command(
help="Read in provided glob of FILES and generate custom metrics for historical benchmark data capture."
)
@click.option(
"--project-id", envvar="GCP_PROJECT_ID", default=None, help="Numeric GCP project ID"
)
@click.option(
"--metrics-per-dashboard",
default=40,
help="Maximum number of metrics per dashboard",
)
@click.option("--template-path", default="templates", help="Root of template path")
@click.argument("files", nargs=-1)
def main(project_id, metrics_per_dashboard, template_path, files):
"""Read in CSV and push custom metrics to project"""
global logger
logger.info("Starting metrics capture and dashboard creation.")
if project_id is None:
project_id = get_project_id()
project_name = f"projects/{project_id}"
logger.info(f'Targeting GCP project "{project_name}"')
for f in files:
metrics = []
metric_keys = []
with open(f, "r") as data:
logger.info(f"Reading {f}...")
for metric in csv.DictReader(data):
# We only need a single iteration of each run to aggregate keys for widgets
if metric["Iteration"] == "1":
# Append keys to listing of keys to be used later in dashboard creation
metric_keys += metric.keys()
# Again don't need every iteration to create the widgets
metrics.append(metric)
# Commit the metric timeseries to GCP services
record_metric(project_name, metric)
# Extract Dashboard name from filename
dashboard_title = os.path.basename(f).split("-")[0]
# Squash key list
metric_keys = set(metric_keys)
# Remove keys that will NOT be used for creating metric filter in the dashboard genneration
filter_keys = list(
metric_keys - set(["Benchmark", "Run", "Iteration", "Score"])
)[
:9
] # Limit to first 10 keys
if metrics_per_dashboard > DASHBOARD_METRIC_CEILING:
logger.warning(
"Metrics per dashboard can not exceed 40 per GCP API limitations. Reset value to 40."
)
metrics_per_dashboard = DASHBOARD_METRIC_CEILING
elif metrics_per_dashboard < DASHBOARD_METRIC_FLOOR:
logger.warning("Metrics per dashboard below 10. Reset value to 10.")
metrics_per_dashboard = DASHBOARD_METRIC_FLOOR
# Generate dashboard templates
dashboard_templates = []
windows_size = len(metrics) + (metrics_per_dashboard - 1)
windows = windows_size // metrics_per_dashboard
for i in range(windows):
metrics_slice = metrics[
(i * metrics_per_dashboard) : (i + 1) * metrics_per_dashboard
]
dashboard_templates.append(
generate_dashboard(
template_path,
project_name,
dashboard_title,
metrics_slice,
filter_keys,
index=i + 1,
)
)
# Publish dashboards to GCP
publish_dashboards(project_name, dashboard_title, dashboard_templates)
logger.info("Completed metrics capture and dashboard creation.")
if __name__ == "__main__":
main()
| 30.89644 | 158 | 0.655808 |
ca2df16482429ebd5ccb3367c7ec22c43f589b3f
| 2,311 |
py
|
Python
|
deeplearning EX/Ex1.py
|
deliciousYSH/Misc.Code
|
5e957ca16b2a7b2a1dff5f0a1dec563dd4757d18
|
[
"MIT"
] | null | null | null |
deeplearning EX/Ex1.py
|
deliciousYSH/Misc.Code
|
5e957ca16b2a7b2a1dff5f0a1dec563dd4757d18
|
[
"MIT"
] | null | null | null |
deeplearning EX/Ex1.py
|
deliciousYSH/Misc.Code
|
5e957ca16b2a7b2a1dff5f0a1dec563dd4757d18
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import matplotlib.pyplot as plt
from torch.autograd import Variable
x_train = np.array([1.3, 2.4, 3.1, 4.32, 5.13, 6.15, 7.25, 8.92, 8.92, 3.12, 4.32, 8.52], dtype=np.float32)
x_train = x_train.reshape(-1, 1)
y_train = np.array([2.3, 1.13, 5.4, 6.1, 6.2, 6.85, 7.03, 8.18, 7.25, 6.82, 1.75, 6.2], dtype=np.float32)
y_train = y_train.reshape(-1, 1)
class linearRegression(torch.nn.Module):
def __init__(self, inputSize, outputSize):
super(linearRegression, self).__init__()
self.linear = torch.nn.Linear(inputSize, outputSize)
def forward(self, x):
out = self.linear(x)
return out
inputDim = 1 # takes variable 'x'
outputDim = 1 # takes variable 'y'
learningRate = 0.01
epochs = 100
##### For GPU #######
if torch.cuda.is_available():
model.cuda()
model = linearRegression(inputDim, outputDim)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learningRate)
for epoch in range(epochs):
# Converting inputs and labels to Variable
if torch.cuda.is_available():
inputs = Variable(torch.from_numpy(x_train).cuda())
labels = Variable(torch.from_numpy(y_train).cuda())
else:
inputs = Variable(torch.from_numpy(x_train))
labels = Variable(torch.from_numpy(y_train))
# Clear gradient buffers because we don't want any gradient from previous epoch to carry forward, dont want to cummulate gradients
optimizer.zero_grad()
# get output from the model, given the inputs
outputs = model(inputs)
# get loss for the predicted output
loss = criterion(outputs, labels)
print(loss)
# get gradients w.r.t to parameters
loss.backward()
# update parameters
optimizer.step()
print('epoch {}, loss {}'.format(epoch, loss.item()))
with torch.no_grad(): # we don't need gradients in the testing phase
if torch.cuda.is_available():
predicted = model(Variable(torch.from_numpy(x_train).cuda())).cpu().data.numpy()
else:
predicted = model(Variable(torch.from_numpy(x_train))).data.numpy()
print(predicted)
plt.clf()
plt.plot(x_train, y_train, 'go', label='True data', alpha=0.5)
plt.plot(x_train, predicted, '--', label='Predictions', alpha=0.5)
plt.legend(loc='best')
plt.show()
| 30.407895 | 134 | 0.672003 |
b6f0a22ecc404ebcdc051115a6062d013933a00a
| 518 |
py
|
Python
|
Algorithms/Strings/Weighted_Uniform_Strings.py
|
vinayvinu500/Hackerrank
|
e185ae9d3c7dc5cd661761142e436f5df6a3f0f1
|
[
"MIT"
] | null | null | null |
Algorithms/Strings/Weighted_Uniform_Strings.py
|
vinayvinu500/Hackerrank
|
e185ae9d3c7dc5cd661761142e436f5df6a3f0f1
|
[
"MIT"
] | null | null | null |
Algorithms/Strings/Weighted_Uniform_Strings.py
|
vinayvinu500/Hackerrank
|
e185ae9d3c7dc5cd661761142e436f5df6a3f0f1
|
[
"MIT"
] | null | null | null |
# https://www.hackerrank.com/challenges/weighted-uniform-string/problem?h_r=internal-search
import string
from collections import Counter
l = string.ascii_lowercase
s = 'abccddde'
d = {}
for i,j in enumerate(l,1):
d[j] = i
e = Counter(s)
w_c = []
w = []
for i,j in e.items():
for k in range(1,j+1):
w_c.append(i*k)
w.append(d[i]*k)
print(d)
print(l)
print(s)
print(e)
print(w)
print(w_c)
q = [1, 3, 12, 5, 9, 10]
for i in q:
if i in w:
print('Yes')
else:
print('No')
| 16.709677 | 91 | 0.596525 |
eda54ba1313b5f9760eb835e348d48e8549774b2
| 2,675 |
py
|
Python
|
src/onegov/town/views/settings.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/town/views/settings.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/town/views/settings.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
""" The settings view, defining things like the logo or color of the org. """
from onegov.core.security import Secret
from onegov.form import Form, merge_forms, move_fields
from onegov.org import _
from onegov.org.forms import HomepageSettingsForm
from onegov.org.models import Organisation
from onegov.org.views.settings import handle_homepage_settings
from onegov.town.app import TownApp
from wtforms import BooleanField, StringField, IntegerField
def get_custom_settings_form(model, request, homepage_settings_form=None):
class CustomFieldsForm(Form):
online_counter_label = StringField(
label=_("Online Counter Label"),
description=_("Forms and applications"))
reservations_label = StringField(
label=_("Reservations Label"),
description=_("Daypasses and rooms"))
daypass_label = StringField(
label=_("SBB Daypass Label"),
description=_("Generalabonnement for Towns"))
publications_label = StringField(
label=_("Publications Label"),
description=_("Official Documents"))
e_move_label = StringField(
label=_('E-Move Label'),
description=_('E-Move')
)
e_move_url = StringField(
label=_('E-Move Url'),
description=_('E-Move')
)
hide_publications = BooleanField(
label=_("Hide Publications on Homepage"))
event_limit_homepage = IntegerField(
label=_("Maximum number of events displayed on homepage")
)
news_limit_homepage = IntegerField(
label=_("Maximum number of news entries on homepage")
)
return move_fields(
form_class=merge_forms(
homepage_settings_form or HomepageSettingsForm, CustomFieldsForm),
fields=(
'online_counter_label',
'reservations_label',
'daypass_label',
'publications_label',
'e_move_label',
'e_move_url',
'hide_publications',
'event_limit_homepage',
'news_limit_homepage'
),
after='homepage_image_6'
)
@TownApp.form(model=Organisation, name='homepage-settings', template='form.pt',
permission=Secret, form=get_custom_settings_form,
setting=_("Homepage"), icon='fa-home', order=-900)
def custom_handle_settings(self, request, form):
form.delete_field('homepage_cover')
form.delete_field('homepage_structure')
form.delete_field('redirect_homepage_to')
form.delete_field('redirect_path')
return handle_homepage_settings(self, request, form)
| 32.621951 | 79 | 0.650841 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.