seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
42335237588
|
# -*- coding:utf-8 -*-
def FindNumsAppearOnce(array):
xorValue = AdjacentXor(array) # o(n)
index = getFirstBit(xorValue)
temp1 = 0
temp2 = 0
for i in array: # o(k*n) k为位数
if isBit1(i, index):
temp1 = temp1^i
else:
temp2 = temp2^i
return temp1, temp2
# 相邻元素做异或
def AdjacentXor(array):
if len(array)==0:
return 0
else:
for i in range(len(array)):
if i==0:
temp = array[i]
else:
temp = temp^array[i]
return temp
# 获取最低位为1对应的索引,比如0111,则返回-1
# bin(5) = '0b101'
def getFirstBit(num):
binstr = bin(num)
lenstr = len(binstr)
for i in range(-1,-1*(lenstr+1),-1):
if binstr[i]==str(1):
return i
def isBit1(num, index):
binstr = bin(num)
if binstr[index]==str(1):
return True
else:
return False
# 获取array各个元素中某位为1相加的合数,长度为32位列表
# 第0个元素表示,array中所有元素的二进制表示的最低位的和,依次类推。
# bin(0)='0b0', bin(-1)='-0b1'
# 如果某个特定位上的1加起来,可以被3整除,说明对应x的那位是0
# 如果某个特定位上的1加起来,不可以被3整除,说明对应x的那位是1
# 根据返回的nums1bit来判断只出现一次的整数的值
def get1BitNum(array):
nums1bit = [0 for i in range(32)]
for num in array:
if num<0:
strlen = len(bin(num))
for j, bit in enumerate(bin(num)[-1,-strlen+2,-1]):
if bit==1:
nums1bit[j] +=1
else:
strlen = len(bin(num))
for j, bit in enumerate(bin(num)[-1,-strlen+1,-1]):
if bit==1:
nums1bit[j] +=1
return nums1bit
|
TangAL0203/code_to_offer
|
python/FindNumsAppearOnce.py
|
FindNumsAppearOnce.py
|
py
| 1,868 |
python
|
en
|
code
| 1 |
github-code
|
6
|
29916696641
|
import unittest
from livecli import Livecli
from livecli.plugins.rtmp import RTMPPlugin
from livecli.stream import RTMPStream
class TestPluginRTMPPlugin(unittest.TestCase):
def setUp(self):
self.session = Livecli()
def assertDictHas(self, a, b):
for key, value in a.items():
self.assertEqual(b[key], value)
def test_can_handle_url(self):
should_match = [
"rtmp://https://example.com/",
"rtmpe://https://example.com/",
"rtmps://https://example.com/",
"rtmpt://https://example.com/",
"rtmpte://https://example.com/",
]
for url in should_match:
self.assertTrue(RTMPPlugin.can_handle_url(url))
should_not_match = [
"https://example.com/index.html",
]
for url in should_not_match:
self.assertFalse(RTMPPlugin.can_handle_url(url))
def _test_rtmp(self, surl, url, params):
plugin = self.session.resolve_url(surl)
streams = plugin.streams()
self.assertTrue("live" in streams)
stream = streams["live"]
self.assertTrue(isinstance(stream, RTMPStream))
self.assertEqual(stream.params["rtmp"], url)
self.assertDictHas(params, stream.params)
def test_plugin_rtmp(self):
self._test_rtmp("rtmp://hostname.se/stream",
"rtmp://hostname.se/stream", dict())
self._test_rtmp("rtmp://hostname.se/stream live=1 qarg='a \\'string' noq=test",
"rtmp://hostname.se/stream", dict(live=True, qarg='a \'string', noq="test"))
self._test_rtmp("rtmp://hostname.se/stream live=1 num=47",
"rtmp://hostname.se/stream", dict(live=True, num=47))
self._test_rtmp("rtmp://hostname.se/stream conn=['B:1','S:authMe','O:1','NN:code:1.23','NS:flag:ok','O:0']",
"rtmp://hostname.se/stream",
dict(conn=['B:1', 'S:authMe', 'O:1', 'NN:code:1.23', 'NS:flag:ok', 'O:0']))
|
ariesw/livecli
|
tests/test_plugin_rtmp.py
|
test_plugin_rtmp.py
|
py
| 2,035 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34711863736
|
from fastapi import FastAPI
from fastapi import HTTPException
import models
app = FastAPI()
coffeeDescriptions = [
"A latte is a coffee drink made with espresso and steamed milk. It is a single shot of espresso served in a tall glass, with a layer of steamed milk on top, and a layer of microfoam on top of that.",
"A cappuccino is an espresso-based coffee drink that originated in Italy, and is traditionally prepared with steamed milk foam.",
"An espresso is a coffee drink that is prepared by forcing a small amount of boiling water under pressure through finely ground coffee beans. Espresso is generally thicker than coffee brewed by other methods, and has cream on top.",
"Your average cup of joe made by putting boiled water through some freshly ground coffee beans, nothing special."
]
coffeePrices = [2.5, 3.5, 4.5, 1.5]
orders = []
@app.get("/")
async def root():
"""
Returns the menu for the coffee shop
"""
return {"menu": {1: "latte", 2: "cappuccino", 3: "espresso", 4:"normal"}}
@app.get("/coffee/{coffee_id}")
async def describeCoffee(coffee_id: int):
"""
Args:
coffee_id (int): The id of the coffee you want to know more about
Raises:
HTTPException: If the coffee_id is not between 1 and 4
Returns:
The description of the coffee
"""
if coffee_id > 4 or coffee_id < 1:
raise HTTPException(status_code=404, detail="Item not found, please choose a number between 1 and 4")
return {"description": coffeeDescriptions[coffee_id-1]}
@app.get("/coffee/{coffee_id}/price")
async def priceCoffee(coffee_id: int):
"""
gets the price of the coffee including tax in USD
Args:
coffee_id (int): The id of the coffee
Raises:
HTTPException: If the coffee_id is not between 1 and 4
Returns:
The price of the coffee including tax in USD
"""
if coffee_id > 4 or coffee_id < 1:
raise HTTPException(status_code=404, detail="Item not found, please choose a number between 1 and 4")
return {"price": coffeePrices[coffee_id-1], "currency": "USD", "tax": 0.1, "total": coffeePrices[coffee_id-1]*1.1,}
@app.post("/coffee/{coffee_id}/order")
async def orderCoffee(coffee_id: int, quantity: int = 1, payed: bool = True):
"""
Orders the coffee
Args:
coffee_id (int): The id of the coffee
quantity (int, optional): The quantity of the coffee. Defaults to 1.
payed (bool, optional): If the coffee has been payed for. Defaults to True.
Raises:
HTTPException: If the coffee_id is not between 1 and 4
Returns:
A message saying that the coffee was ordered
"""
if coffee_id > 4 or coffee_id < 1:
raise HTTPException(status_code=404, detail="Item not found, please choose a number between 1 and 4")
if not payed:
raise HTTPException(status_code=402, detail="You have not payed for your coffee")
orders.append(coffee_id)
return {"message": "Your coffee has been ordered"}
@app.get("/orders")
async def getOrders():
"""
Gets all the orders
Returns:
A list of all the orders
"""
return {"orders": orders}
@app.delete("/orders/{order_number}")
async def deleteOrders(order_number: int, token: models.Token):
"""
Deletes an order
Args:
order_number (int): The order number
Raises:
HTTPException: If the order_id is not in the list of orders
Returns:
A message saying that the order was deleted
"""
if token.id != "secret":
raise HTTPException(status_code=403, detail="You do not have permission to delete orders")
if order_number > len(orders) or order_number < 1:
raise HTTPException(status_code=404, detail="Order not found")
orders.pop(order_number-1)
return {"message": "Your order has been deleted"}
if __name__ == "__main__":
import uvicorn
# launch the server on port 8000
uvicorn.run(app, host="localhost", port=8000)
|
ByteOfKathy/RESTAPI-example
|
backend.py
|
backend.py
|
py
| 3,994 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37169098035
|
__author__ = "Moath Maharmeh"
__license__ = "GNU General Public License v2.0"
__version__ = "1.1"
__email__ = "[email protected]"
__created__ = "13/Dec/2018"
__modified__ = "5/Apr/2019"
__project_page__ = "https://github.com/iomoath/file_watchtower"
import sqlite3
import os
import csv
DEFAULT_PATH = os.path.join(os.path.dirname(__file__), 'database.sqlite3')
def get_db_path():
global DEFAULT_PATH
return DEFAULT_PATH
def db_connect(db_path=DEFAULT_PATH):
con = sqlite3.connect(db_path)
return con
def create_tables():
file_record_query = """
CREATE TABLE IF NOT EXISTS file_record (
id INTEGER PRIMARY KEY AUTOINCREMENT,
file_path TEXT NOT NULL UNIQUE,
hash TEXT NOT NULL,
file_size TEXT NOT NULL,
exists_on_disk varchar(6) NOT NULL,
datetime_last_check TEXT NOT NULL)"""
email_msg_query = """
CREATE TABLE IF NOT EXISTS email_msg (
id INTEGER PRIMARY KEY,
subject TEXT NOT NULL,
body TEXT NOT NULL,
attachment TEXT,
is_sent VARCHAR(6) DEFAULT 'False')"""
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute(file_record_query)
cursor.execute(email_msg_query)
except:
pass
finally:
conn.commit()
conn.close()
def insert_file_record(file_record_dict):
conn = db_connect()
try:
cursor = conn.cursor()
query = """
INSERT INTO file_record (file_path, hash, file_size, exists_on_disk, datetime_last_check)
VALUES (?, ?, ?, ?, ?)"""
cursor.execute(query,
(file_record_dict["path"], file_record_dict["hash"], file_record_dict["file_size"],
file_record_dict["exists_on_disk"], file_record_dict["datetime_last_check"]))
return cursor.lastrowid
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def get_exists_on_disk_value(file_path):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT exists_on_disk FROM file_record WHERE file_path=? LIMIT 1", (file_path,))
rows = cursor.fetchall()
return rows[0][0]
except IndexError:
return None
finally:
conn.close()
def get_exists_on_disk_value_by_hash(file_hash):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT exists_on_disk FROM file_record WHERE hash=? LIMIT 1", (file_hash,))
rows = cursor.fetchall()
return rows[0][0]
except IndexError:
return None
finally:
conn.close()
def update_exists_on_disk_value(file_path, new_value):
conn = db_connect()
try:
cursor = conn.cursor()
query = """UPDATE file_record SET exists_on_disk =? WHERE file_path =?"""
cursor.execute(query, (new_value, file_path,))
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def update_exists_on_disk_value_by_hash(file_hash, new_value):
conn = db_connect()
try:
cursor = conn.cursor()
query = """UPDATE file_record SET exists_on_disk =? WHERE hash =?"""
cursor.execute(query, (new_value, file_hash,))
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def update_file_last_check(file_path, new_datetime_check):
conn = db_connect()
try:
cursor = conn.cursor()
query = """UPDATE file_record SET datetime_last_check =? WHERE file_path =?"""
cursor.execute(query, (new_datetime_check, file_path,))
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def update_file_path(file_hash, old_path, new_path):
conn = db_connect()
try:
cursor = conn.cursor()
query = """UPDATE file_record SET file_path =? WHERE hash =? and file_path=?"""
cursor.execute(query, (new_path, file_hash, old_path))
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def get_file_records(file_path):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT * FROM file_record WHERE file_path=?", (file_path,))
rows = cursor.fetchall()
return rows
except IndexError:
return None
finally:
conn.commit()
conn.close()
def get_file_records_by_hash(file_hash):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT * FROM file_record WHERE hash=?", (file_hash,))
rows = cursor.fetchall()
return rows
except Exception:
conn.rollback()
raise
finally:
conn.close()
def get_all_file_paths():
# returns all files paths
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT file_path FROM file_record")
rows = cursor.fetchall()
path_list = []
for row in rows:
path_list.append(row[0])
return path_list
except:
conn.rollback()
finally:
conn.close()
def get_file_hash(file_path):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT hash FROM file_record WHERE file_path=? LIMIT 1", (file_path,))
rows = cursor.fetchall()
return rows[0][0]
except IndexError:
return None
finally:
conn.close()
def get_file_path_by_hash(file_hash):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT file_path FROM file_record WHERE hash=? LIMIT 1", (file_hash,))
rows = cursor.fetchall()
return rows[0][0]
except IndexError:
return None
finally:
conn.close()
def is_file_has_record_by_path(file_path):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT id FROM file_record WHERE file_path=? LIMIT 1", (file_path,))
rows = cursor.fetchall()
return len(rows) > 0
except:
conn.rollback()
return False
finally:
conn.close()
def is_file_has_record_by_hash(hash):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT id FROM file_record WHERE hash=? LIMIT 1", (hash,))
rows = cursor.fetchall()
return len(rows) > 0
except:
conn.rollback()
finally:
conn.close()
def get_file_size(file_path):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT file_size FROM file_record WHERE file_path=? LIMIT 1", (file_path,))
rows = cursor.fetchall()
return rows[0][0]
except IndexError:
return None
finally:
conn.close()
def get_file_size_by_hash(file_hash):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT file_size FROM file_record WHERE hash=? LIMIT 1", (file_hash,))
rows = cursor.fetchall()
return rows[0][0]
except IndexError:
return None
finally:
conn.close()
def update_file_hash(file_path, new_hash):
conn = db_connect()
try:
cursor = conn.cursor()
query = """UPDATE file_record SET hash =? WHERE file_path =?"""
cursor.execute(query, (new_hash, file_path,))
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def delete_file_record(file_path):
conn = db_connect()
try:
cursor = conn.cursor()
query = """DELETE FROM file_record WHERE file_path=?"""
cursor.execute(query, (file_path,))
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def insert_email_msg(email_msg_dict):
conn = db_connect()
try:
cursor = conn.cursor()
query = """
INSERT INTO email_msg (subject, body, attachment)
VALUES (?, ?, ?)"""
cursor.execute(query,
(
email_msg_dict["subject"],
email_msg_dict["body"],
email_msg_dict["attachment"]))
return cursor.lastrowid
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def delete_msg(msg_id):
conn = db_connect()
try:
cursor = conn.cursor()
query = """DELETE FROM email_msg WHERE id=?"""
cursor.execute(query, (msg_id,))
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def get_unsent_messages():
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT * FROM email_msg WHERE is_sent='False'")
rows = cursor.fetchall()
list_messages = []
for row in rows:
msg = {
"id": row[0],
"subject": row[1],
"body": row[2],
"attachments": row[3],
"is_sent": row[4]
}
list_messages.append(msg)
return list_messages
except:
conn.rollback()
raise
finally:
conn.close()
def delete_sent_messages():
conn = db_connect()
try:
cursor = conn.cursor()
query = """DELETE FROM email_msg WHERE is_sent=?"""
cursor.execute(query, ("True",))
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def dump_file_records_to_csv(export_path):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute('SELECT * FROM file_record')
with open(export_path, 'w') as out_csv_file:
csv_out = csv.writer(out_csv_file)
# write header
csv_out.writerow([d[0] for d in cursor.description])
# write data
for result in cursor:
csv_out.writerow(result)
except:
conn.rollback()
raise
finally:
conn.close()
def delete_all_data():
conn = db_connect()
try:
cursor = conn.cursor()
query1 = """DELETE FROM email_msg"""
query2 = """DELETE FROM file_record"""
cursor.execute(query1, )
cursor.execute(query2, )
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
# init the database, if no db file or tables, it will be created here
create_tables()
|
iomoath/file_watchtower
|
db.py
|
db.py
|
py
| 10,875 |
python
|
en
|
code
| 30 |
github-code
|
6
|
73739274748
|
#!/usr/bin/env python3
""""
This module provides the interface to manage the state of configured workers.
It allows to setup the virtual environment, install dependencies into it and
then to execute BuildBot worker commands.
"""
import sys
import os.path
import argparse
import getpass
import socket
import paramiko
import logging
sys.path.append(os.path.abspath("{}/../../master/".format(__file__)))
import maxscale.config.workers as workers
def determineHost(host, domain):
possibleHosts = [
host,
"{}.{}".format(host, domain)
]
for checkHost in possibleHosts:
try:
socket.gethostbyname(checkHost)
except BaseException:
continue
return checkHost
return None
def determineHosts(arguments):
hosts = {}
for hostConfiguration in workers.WORKER_CREDENTIALS:
if arguments.host is not None and hostConfiguration["host"] != arguments.host:
continue
host = determineHost(hostConfiguration["host"], arguments.domain)
if host is None:
continue
if host in hosts:
hosts[host].append(hostConfiguration)
else:
hosts[host] = [hostConfiguration]
return hosts
def runCommand(sshClient, command):
logging.debug("Calling command '{}'".format(command))
stdin, stdout, stderr = sshClient.exec_command(command)
stdin.close()
stdoutContents = stdout.readlines()
stderrContents = stderr.readlines()
stdoutText = "".join(stdoutContents).strip()
stderrText = "".join(stderrContents).strip()
logging.debug("Stdout:\n{}".format(stdoutText))
logging.debug("Stderr:\n{}".format(stderrText))
return [stdoutText, stderrText]
def isDirectoryAbsent(sshClient, directory):
_, stderr = runCommand(sshClient, "ls -ld {}".format(directory))
if stderr:
return True
else:
return False
PYTHON_VENV = "~/buildbot-virtual-env"
WORKERS_DIR = "~/buildbot-workers"
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
def executeActionOnHost(hosts, user, description, action):
"""Execute an action for every host"""
client = paramiko.SSHClient()
client.load_system_host_keys()
for hostIp in hosts:
logging.info(description.format(hostIp=hostIp))
client.connect(hostIp, username=user)
action(client, hosts[hostIp])
client.close()
def setupVirtualEnv(sshClient):
if isDirectoryAbsent(sshClient, PYTHON_VENV):
logging.info("Creating python virtual environment in {}".format(PYTHON_VENV))
runCommand(sshClient, "python3 -m virtualenv -p /usr/bin/python3 {}".format(PYTHON_VENV))
logging.info("Installing latest version of requirements")
absolutePythonEnvDir, _ = runCommand(sshClient, "cd {}; pwd".format(PYTHON_VENV))
sftClient = sshClient.open_sftp()
sftClient.put("{}/requirements-worker.txt".format(CURRENT_DIR), "{}/requirements.txt".format(absolutePythonEnvDir))
workerWrapper = "{}/bin/run-worker.py".format(absolutePythonEnvDir)
sftClient.put("{}/run-worker.py".format(CURRENT_DIR), workerWrapper)
sftClient.chmod(workerWrapper, 0o755)
runCommand(sshClient, "{}/bin/pip3 install -U -r {}/requirements.txt".format(PYTHON_VENV, PYTHON_VENV))
def configureVirtualEnvironment(hosts, arguments):
def performAction(client, _):
setupVirtualEnv(client)
executeActionOnHost(hosts, arguments.user, "Configuring virtual environment on host '{hostIp}'", performAction)
def createWorkerConfig(sshClient, config, masterHost):
logging.info("Creating configuration for worker '{}'.".format(config["name"]))
runCommand(sshClient, "mkdir -p {}".format(WORKERS_DIR))
runCommand(sshClient, "rm -rf {dir}/{name}".format(dir=WORKERS_DIR, **config))
runCommand(sshClient, "{venv}/bin/run-worker.py create-worker --umask=0o002 {dir}/{name} {server} {name} {password}".format(
venv=PYTHON_VENV, dir=WORKERS_DIR, server=masterHost, **config))
runCommand(sshClient, "echo '{host}' > {dir}/{name}/info/host".format(dir=WORKERS_DIR, **config))
def installWorkers(hosts, arguments):
def performAction(client, host):
setupVirtualEnv(client)
for worker in host:
createWorkerConfig(client, worker, arguments.master)
stopWorkers(hosts, arguments)
executeActionOnHost(hosts, arguments.user, "Configuring host '{hostIp}'", performAction)
def callBuildbotAction(action, hosts, arguments):
def performAction(client, host):
for worker in host:
if isDirectoryAbsent(client, "{dir}/{name}".format(dir=WORKERS_DIR, **worker)):
logging.error("Worker '{name}' configuration does not exist, doing nothing".format(**worker))
continue
runCommand(client, "{venv}/bin/run-worker.py {action} {dir}/{name}".format(
venv=PYTHON_VENV, dir=WORKERS_DIR, action=action, **worker))
logging.info("Executing action '{}'".format(action))
executeActionOnHost(hosts, arguments.user, "Executing command on host '{hostIp}", performAction)
def restartWorkers(hosts, arguments):
callBuildbotAction("restart", hosts, arguments)
def stopWorkers(hosts, arguments):
callBuildbotAction("stop", hosts, arguments)
def startWorkers(hosts, arguments):
callBuildbotAction("start", hosts, arguments)
AVAILABLE_ACTIONS = {
"install": installWorkers,
"configureVenv": configureVirtualEnvironment,
"restart": restartWorkers,
"stop": stopWorkers,
"start": startWorkers
}
def parseArguments():
parser = argparse.ArgumentParser(description="A tool to install, restart the BuildBot worker instances.")
parser.add_argument("action", help="Action to perform, install for example.", choices=AVAILABLE_ACTIONS.keys())
parser.add_argument("--host", help="Host to manage.")
parser.add_argument("--user", help="User to use during the SSH connection to host.", default=getpass.getuser())
parser.add_argument("--domain", help="Default domain for hosts", default="mariadb.com")
parser.add_argument("--master", help="Domain name of the master to configure on workers",
default="maxscale-jenkins.mariadb.com")
parser.add_argument("--debug", help="Show debug output", dest="debug", action="store_true")
parser.set_defaults(debug=False)
return parser.parse_args()
def main():
arguments = parseArguments()
if arguments.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
action = AVAILABLE_ACTIONS.get(arguments.action)
if action is None:
logging.error("Unknown action '{}'.".format(arguments.action))
exit(1)
hosts = determineHosts(arguments)
action(hosts, arguments)
if __name__ == "__main__":
main()
|
dA505819/maxscale-buildbot
|
worker-management/manage.py
|
manage.py
|
py
| 6,833 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70488670907
|
# accepted on coderun
import sys
deltas = ((1, 1), (-1, 1), (-1, -1), (1, -1))
def cut_down():
n, m, w, b, whites, blacks, turn = get_pars()
checkers_board = [[' ' for i in range(n)] for j in range(m)]
for j_, i_ in whites:
checkers_board[j_ - 1][i_ - 1] = 'w'
for j_, i_ in blacks:
checkers_board[j_ - 1][i_ - 1] = 'b'
for j_, i_ in (whites if turn == 'white' else blacks):
for dj, di in deltas:
if 0 <= j_ + dj - 1 < m and 0 <= i_ + di - 1 < n:
if checkers_board[j_ + dj - 1][i_ + di - 1] == ('b' if turn == 'whites' else 'w'):
if 0 <= j_ + 2 * dj - 1 < m and 0 <= i_ + 2 * di - 1 < n:
if checkers_board[j_ + 2 * dj - 1][i_ + 2 * di - 1] == ' ':
return 'Yes'
return 'No'
def get_pars() -> tuple[int, int, int, int, list[list[int, ...]], list[list[int, ...]], str]:
n, m = [int(_) for _ in input().split(' ')]
w = int(input())
whites = [[int(_) for _ in input().split(' ')] for _ in range(w)]
b = int(input())
blacks = [[int(_) for _ in input().split(' ')] for _ in range(b)]
turn = input()
return n, m, w, b, whites, blacks, turn
print(f'{cut_down()}')
|
LocusLontrime/Python
|
Yandex_fast_recruit_days/Easy/Checkers.py
|
Checkers.py
|
py
| 1,242 |
python
|
en
|
code
| 1 |
github-code
|
6
|
12100232146
|
import random
from heaps import heapsort
def mergesort(l, low, high):
def merge(l, low, middle, high):
left_runner = low
right_runner = middle + 1;
# While there are elements in right run:
sorted_l = []
while left_runner <= middle and right_runner <= high:
if l[left_runner] <= l[right_runner]:
sorted_l.append(l[left_runner])
left_runner += 1
else:
sorted_l.append(l[right_runner])
right_runner += 1
while left_runner <= middle:
sorted_l.append(l[left_runner])
left_runner += 1
while right_runner <= high:
sorted_l.append(l[right_runner])
right_runner += 1
for i in range(low, high + 1):
l[i] = sorted_l[i - low]
# This is the mergesort that uses merge
if low < high:
middle = int((low + high) / 2)
mergesort(l, low, middle)
mergesort(l, middle+1, high)
return merge(l, low, middle, high)
def insertion_sort(l):
# No need to sort empty list
if l == []:
return l
new_l = [l[0]]
new_l_len = 0
for elem in l[1:]:
for i in range(new_l_len, -1, -1):
if elem >= new_l[i]:
new_l.insert(i+1, elem)
new_l_len += 1
break
if i == 0:
new_l.insert(0, elem)
new_l_len += 1
return new_l
def quicksort(l):
if len(l) <= 1:
return l
pivot_index = random.randint(0, len(l)-1)
pivot = l.pop(pivot_index)
lte_pivot = []
gt_pivot = []
for elem in l:
if elem <= pivot:
lte_pivot.append(elem)
else:
gt_pivot.append(elem)
return quicksort(lte_pivot) + [pivot] + quicksort(gt_pivot)
def quicksort_inplace(l, left=None, right=None):
def partition(l, left, right, pivot_index):
'''
After running the list will have the pivot in its place for sorting, only greater elements to its right.
left is the index of the leftmost element of the subarray
right is the index of the rightmost element of the subarray (inclusive)
number of elements in subarray = right-left+1
'''
pivot_value = l[pivot_index]
l[pivot_index], l[right] = l[right], l[pivot_index] # move pivot to end
store_index = left
for i in range(left, right):
if l[i] <= pivot_value:
l[i], l[store_index] = l[store_index], l[i]
store_index += 1
l[right], l[store_index] = l[store_index], l[right] # move pivot to its final place
return store_index
# in-place quicksort
if left is None:
left = 0
right = len(l)-1
if left < right:
pivot_index = random.randint(left, right-1)
pivot_new_index = partition(l, left, right, pivot_index)
quicksort_inplace(l, left, pivot_new_index-1)
quicksort_inplace(l, pivot_new_index+1, right)
|
Shaywei/MyDevTools
|
Python/BasicDataStructures/sorts.py
|
sorts.py
|
py
| 3,070 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21509653092
|
from fastapi import APIRouter, Depends, Request, Response
from sqlalchemy.orm import Session
from typing import List
from uuid import UUID
from api.models.node_threat import NodeThreatCreate, NodeThreatRead, NodeThreatUpdate
from api.routes import helpers
from db import crud
from db.database import get_db
from db.schemas.node_threat import NodeThreat
from db.schemas.node_threat_type import NodeThreatType
router = APIRouter(
prefix="/node/threat",
tags=["Node Threat"],
)
#
# CREATE
#
def create_node_threat(
node_threat: NodeThreatCreate,
request: Request,
response: Response,
db: Session = Depends(get_db),
):
# Make sure that all the threat types that were given actually exist
db_threat_types = crud.read_by_values(values=node_threat.types, db_table=NodeThreatType, db=db)
# Create the new node threat
new_threat = NodeThreat(**node_threat.dict())
# Set the threat types on the new node threat
new_threat.types = db_threat_types
# Save the new node threat to the database
db.add(new_threat)
crud.commit(db)
response.headers["Content-Location"] = request.url_for("get_node_threat", uuid=new_threat.uuid)
helpers.api_route_create(router, create_node_threat)
#
# READ
#
def get_all_node_threats(db: Session = Depends(get_db)):
return crud.read_all(db_table=NodeThreat, db=db)
def get_node_threat(uuid: UUID, db: Session = Depends(get_db)):
return crud.read(uuid=uuid, db_table=NodeThreat, db=db)
helpers.api_route_read_all(router, get_all_node_threats, List[NodeThreatRead])
helpers.api_route_read(router, get_node_threat, NodeThreatRead)
#
# UPDATE
#
def update_node_threat(
uuid: UUID,
node_threat: NodeThreatUpdate,
request: Request,
response: Response,
db: Session = Depends(get_db),
):
# Read the current node threat from the database
db_node_threat: NodeThreat = crud.read(uuid=uuid, db_table=NodeThreat, db=db)
# Get the data that was given in the request and use it to update the database object
update_data = node_threat.dict(exclude_unset=True)
if "description" in update_data:
db_node_threat.description = update_data["description"]
if "value" in update_data:
db_node_threat.value = update_data["value"]
if "types" in update_data:
db_node_threat.types = crud.read_by_values(
values=update_data["types"], db_table=NodeThreatType, db=db
)
crud.commit(db)
response.headers["Content-Location"] = request.url_for("get_node_threat", uuid=uuid)
helpers.api_route_update(router, update_node_threat)
#
# DELETE
#
def delete_node_threat(uuid: UUID, db: Session = Depends(get_db)):
crud.delete(uuid=uuid, db_table=NodeThreat, db=db)
helpers.api_route_delete(router, delete_node_threat)
|
hollyfoxx/ace2-gui
|
backend/app/api/routes/node_threat.py
|
node_threat.py
|
py
| 2,805 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20802484372
|
class Graph:
def __init__(self):
self.edges = []
self.visited = []
self.adjacent = 0
pass
def from_matrix(self, matrix):
self.visited = [False for i in range(len(matrix))]
for i in matrix:
self.edges.append([])
for i in range(len(matrix)):
for u in range(len(matrix[i])):
if matrix[i][u] == 1:
self.edges[i].append(u)
def dfs_util(self, node,visited):
visited[node] = True
self.adjacent += 1
for i in self.edges[node]:
if visited[i] == False:
self.dfs_util(i, visited)
edges = []
for i in range(num_e):
u, v = [int(i) for i in input().split()]
edges.append([u, v])
edges.append([v, u])
def util(node, edges, visited):
visited[node] = True
for edge in edges:
if edge[0]-1 == node and visited[edge[1]-1]==False:
util(edge[1]-1, edges, visited)
|
michbogos/olymp
|
utils/graph.py
|
graph.py
|
py
| 964 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36213639675
|
#!/usr/bin/env python
# coding: utf-8
import os
import math
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from PIL import Image
import time
import os,glob
import matplotlib.pyplot as plt
from random import choice
VGG_MEAN=[103.939,116.779,123.68]
class VGGNet():
def __init__(self,data_dict):
self.data_dict=data_dict
def get_conv_filter(self,name):
return tf.constant(self.data_dict[name][0],name='conv')
def get_fc_weight(self,name):
return tf.constant(self.data_dict[name][0],name='fc')
def get_bias(self,name):
return tf.constant(self.data_dict[name][1],name='bias')
def conv_layer(self,x,name):
with tf.name_scope(name):
conv_w=self.get_conv_filter(name)
conv_b=self.get_bias(name)
h=tf.nn.conv2d(x,conv_w,strides=[1,1,1,1],padding="SAME")
h=tf.nn.bias_add(h,conv_b)
h=tf.nn.relu(h)
return h
def pooling_layer(self,x,name):
return tf.nn.max_pool(x,ksize=[1,2,2,1],
strides=[1,2,2,1],
padding="SAME",name=name)
def fc_layer(self,x,name,activation=tf.nn.relu):
with tf.name_scope(name,activation):
fc_w=self.get_fc_weight(name)
fc_b=self.get_bias(name)
h=tf.matmul(x,fc_w)
h=tf.nn.bias_add(h,fc_b)
if activation is None:
return h
else:
return activation(h)
def flatten_layer(self,x,name):
with tf.name_scope(name):
x_shape=x.get_shape().as_list()
dim=1
for d in x_shape[1:]:
dim*=d
x=tf.reshape(x,[-1,dim])
return x
def build(self,x_rgb):
start_time=time.time()
print("Modeling Start...")
r,g,b=tf.split(x_rgb,[1,1,1],axis=3)
x_bgr=tf.concat([b-VGG_MEAN[0],g-VGG_MEAN[1],r-VGG_MEAN[2]],axis=3)
# 开始构建卷积层
# vgg16 的网络结构
# 第一层:2个卷积层 1个pooling层
# 第二层:2个卷积层 1个pooling层
# 第三层:3个卷积层 1个pooling层
# 第四层:3个卷积层 1个pooling层
# 第五层:3个卷积层 1个pooling层
# 第六层: 全连接
# 第七层: 全连接
# 第八层: 全连接
self.conv1_1=self.conv_layer(x_bgr,'conv1_1')
self.conv1_2=self.conv_layer(self.conv1_1,'conv1_2')
self.pool1=self.pooling_layer(self.conv1_2,'pool1')
self.conv2_1 = self.conv_layer(self.pool1, 'conv2_1')
self.conv2_2 = self.conv_layer(self.conv2_1, 'conv2_2')
self.pool2 = self.pooling_layer(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, 'conv3_1')
self.conv3_2 = self.conv_layer(self.conv3_1, 'conv3_2')
self.conv3_3 = self.conv_layer(self.conv3_2, 'conv3_3')
self.pool3 = self.pooling_layer(self.conv3_3, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, 'conv4_1')
self.conv4_2 = self.conv_layer(self.conv4_1, 'conv4_2')
self.conv4_3 = self.conv_layer(self.conv4_2, 'conv4_3')
self.pool4 = self.pooling_layer(self.conv4_3, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, 'conv5_1')
self.conv5_2 = self.conv_layer(self.conv5_1, 'conv5_2')
self.conv5_3 = self.conv_layer(self.conv5_2, 'conv5_3')
self.pool5 = self.pooling_layer(self.conv5_3, 'pool5')
''' 因为风格转换只需要 卷积层 的数据
self.flatten5 = self.flatten_layer(self.pool5, 'flatten')
self.fc6 = self.fc_layer(self.flatten5, 'fc6')
self.fc7 = self.fc_layer(self.fc6, 'fc7')
self.fc8 = self.fc_layer(self.fc7, 'fc8', activation = None)
self.prob = tf.nn.softmax(self.fc8, name = 'prob')
'''
print('Modeling Finished...:%f ms' % ((time.time() - start_time)*1000))
def initial_result(shape,mean,stddev):
initial=tf.truncated_normal(shape,mean=mean,stddev=stddev)
return tf.Variable(initial)
def read_img(img_name):
img=Image.open(img_name)
img=img.convert('RGB')
img = img.resize((224, 224))
np_img=np.array(img)
np_img=np.asarray([np_img],dtype=np.int32)
return np_img
def gram_matrix(x):
b,w,h,ch=x.get_shape().as_list()
features=tf.reshape(x,[b,h*w,ch])
gram=tf.matmul(features,features,adjoint_a=True)/tf.constant(ch*w*h,tf.float32)
return gram
vgg16_npy_path="./vgg_model/vgg16.npy"
image_pattern = "./images/content/video_dlzm*jpg"
output_dir="./images/results"
style_img_path="./images/style/Vincent_Willem_van_Gogh_085.jpg"
image_paths = glob.glob(image_pattern)
image_paths.sort()
num_step=100
learning_rate=10
lambda_c=0.1
lambda_s=50
for n,p in enumerate(image_paths):
print(n)
content_img_path = p
result=initial_result((1,224,224,3),127.5,20)
content_val=read_img(content_img_path)
style_val=read_img(style_img_path)
content=tf.placeholder(tf.float32,shape=[1,224,224,3])
style=tf.placeholder(tf.float32,shape=[1,224,224,3])
data_dict=np.load(vgg16_npy_path,encoding="latin1",allow_pickle=True).item()
vgg_for_content=VGGNet(data_dict)
vgg_for_style=VGGNet(data_dict)
vgg_for_result=VGGNet(data_dict)
vgg_for_content.build(content)
vgg_for_style.build(style)
vgg_for_result.build(result)
# 提取哪些层特征
# 需要注意的是:内容特征抽取的层数和结果特征抽取的层数必须相同
# 风格特征抽取的层数和结果特征抽取的层数必须相同
content_features=[vgg_for_content.conv3_2,]
result_content_features=[vgg_for_result.conv3_2,]
style_features=[vgg_for_style.conv4_1,
vgg_for_style.conv5_1,]
style_gram=[gram_matrix(feature) for feature in style_features]
result_style_features=[vgg_for_result.conv4_1,
vgg_for_result.conv5_1,]
result_style_gram=[gram_matrix(feature) for feature in result_style_features]
content_loss=tf.zeros(1,tf.float32)
for c,c_ in zip(content_features,result_content_features):
content_loss+=tf.reduce_mean((c-c_)**2,axis=[1,2,3])
style_loss=tf.zeros(1,tf.float32)
for s,s_ in zip(style_gram,result_style_gram):
style_loss+=0.2*tf.reduce_mean((s-s_)**2,[1,2])
loss=content_loss*lambda_c+style_loss*lambda_s
train_op=tf.train.AdamOptimizer(learning_rate).minimize(loss)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
for step in range(num_step):
loss_value,content_loss_value,style_loss_value,_= sess.run([loss,content_loss,style_loss,train_op],
feed_dict={
content:content_val,
style:style_val
})
# print('step: %d, loss_value: %8.4f, content_loss: %8.4f, style_loss: %8.4f' % (step+1,
# loss_value[0],
# content_loss_value[0],
# style_loss_value[0]))
if step+1 == num_step:
result_img_path=os.path.join(output_dir,'result_%03d_%05d.jpg'%(n,step+1))
result_val=result.eval(sess)[0]
result_val=np.clip(result_val,0,255)
img_arr=np.asarray(result_val,np.uint8)
img=Image.fromarray(img_arr)
img.save(result_img_path)
|
castleKing1997/Style_Transfer
|
StyleTransfer.py
|
StyleTransfer.py
|
py
| 7,795 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34616102356
|
from fenics import *
import numpy as np
from Stretch_Mesh import stretch_mesh
def solver_para(nx,ny,tau):
# Create mesh and define function space
mesh = stretch_mesh(nx=nx,ny=ny)
V = FunctionSpace(mesh, "P", 1)
# Define boundary condition
tol = 1E-14
u_D = Expression('near(x[0], 1, tol) ? pow(1-x[1],4)*exp(-t*tau):0', degree=4, tol=tol,tau=tau, t=0)
def boundary(x, on_boundary):
return on_boundary
bc = DirichletBC(V, u_D, boundary)
# Define initial value
u_n = project(u_D, V)
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
f = Constant(0.0)
return V, u, v, u_n, f, u_D, bc, mesh
def solver_run(alpha, V, u, v, u_n, f, u_D, bc, mesh, epsilon, num_steps):
dt = 0.01 # time step size
w = Expression(('exp(alpha)*2*x[1]*(1-x[0]*x[0])', 'exp(alpha)*-2*x[0]*(1-x[1]*x[1])'),alpha = alpha,degree=3) #define the wind
F = u * v * dx + epsilon * dt * dot(grad(u), grad(v)) * dx + dt * dot(w, grad(u)) * v * dx - (u_n + dt * f) * v * dx
a, L = lhs(F), rhs(F)
# Time-stepping
t = 0
# Create VTK file for saving solution
# namefile = str(alpha)+"-alpha.pvd"
# vtkfile = File("Alixcode/alixswork/"+namefile)
# list to store t and u
u = Function(V)
u_list = []
for n in range(num_steps):
# Update current time
t += dt
u_D.t = t #update the time in the boundary condition
# Compute solution
solve(a == L, u, bc)
# # # Plot solution
# plot(u, cmap='jet', scalarbar='h', text=__doc__)
# Save to file and plot solution
# vtkfile << (u, t)
# Compute u at the vertices and add them to the list
u_approx = u.compute_vertex_values(mesh)
u_list.append(u_approx.copy())
# Update previous solution
u_n.assign(u)
return u_list
if __name__ == "__main__":
from vedo.dolfin import plot, Latex, clear, histogram
import matplotlib.pyplot as plt
def main():
# Coarse.
tau = 1/10
epsilon = 1/200
num_steps = 100
nx = 32
ny = 32
alpha = 0
V, u, v, u_n, f, u_D, bc, mesh = solver_para(nx,ny,tau)
return solver_run(alpha, V, u, v, u_n, f, u_D, bc, mesh, epsilon, num_steps), mesh
u_list, mesh = main()
# mesh_fine = refine(mesh)
# V_fine = FunctionSpace(mesh_fine, "P", 1)
# u_fine = project(u_list[0], V_fine)
# u_approx_fine = u_fine.compute_vertex_values(mesh_fine)
# u_approx = u_list[0]
# print(np.shape(u_approx), np.shape(u_approx_fine))
|
alixsleroy/PhD-project2
|
Solver Package/Interpolate_Solver.py
|
Interpolate_Solver.py
|
py
| 2,554 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20473378944
|
import json
import numpy as np
class calculte():
def __init__(self, data, n_x, n_y, t_s, morning_time, afternoon_time):
self.data = data
self.n_x = n_x
self.n_y = n_y
self.t_s = t_s
self.morning = morning_time
self.afternoon_time = afternoon_time
def _process_data_(self, num):
list_patientID = np.array(self.data['就诊号'])[:]
list_doctID = np.array(self.data['医生'])[:]
list_sleepy = np.array(self.data['麻醉方式'])[:]
list_operation = np.array(self.data['time'])[:]
list_clean = np.array(self.data['手术级别'])[:]
list_operation = (np.ceil(list_operation / 5) * 5).astype(np.int)
list_sleepy.reshape((num, 1))
for i in range(num):
b = list_sleepy[i]
if (b == '全身麻醉' or b == '全身麻醉(喉罩)'):
tb = 60
else:
tb = 0
list_sleepy[i] = tb
list_clean.reshape((num, 1))
for i in range(num):
a = list_clean[i]
if a == '1.0':
tp = 10
elif a == '2.0' or a == '3.0':
tp = 20
else:
tp = 30
list_clean[i] = tp
c = np.vstack((list_doctID, list_patientID, list_operation, list_sleepy, list_clean))
key = [i + 1 for i in range(num)]
e = [] #存储了所有信息的列表,每一个列表的内容是一个字典
for i in range(num):
f = dict()
d = c[:, i]
f[key[i]] = d
e.append(f)
return list_doctID, list_patientID, list_operation, list_sleepy, list_clean, e
def _best_result_(self,best_paixu,Num,list_doctID,list_sleepy,list_operation,list_clean):
return list_1,list_2,list_3
def _get_list_(self,a):
key = []
dic = {}
key_2 = ['time_of_operation', 'time_of_sleep', 'time_of_clean']
for i in range(self.n_x):
c = a[i]
key.append('手术室{}'.format(i+1))
x = []
for j in range(int(len(c) / 3)):
e = 3 * j
d = c[e:e + 3]
f = dict(zip(key_2, d))
x.append(f)
dic[key[i]] = x
return dic
def _output_date_(self,output_1):
f = open('output.json', 'w', encoding='utf-8')
json.dump(output_1, f, ensure_ascii=False, indent=4)
f.close()
|
Jkcert/deecamp-frontend
|
src/ors_backend/model/schedule/calculation.py
|
calculation.py
|
py
| 2,473 |
python
|
en
|
code
| 1 |
github-code
|
6
|
30109795593
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 7 10:30:52 2021
@author: X
"""
import json
import lz4.frame, lz4.block
import os
import copy
# The full path to the Firefox folders is:
# C:\Users\USERNAME\AppData\Roaming\Mozilla\Firefox\Profiles
# Each profile gets its own folder and from there, the bookmark files are saved
# in "bookmarksbackups".
def bookmarkbackups():
USERS=r"C:\Users"
users=os.listdir(USERS)
MOZAPPDATA=r"AppData\Roaming\Mozilla\Firefox\Profiles"
REMOVE=['All Users', 'Default', 'Default User', 'desktop.ini', 'Public']
rv=[]
for each in REMOVE:
users.remove(each)
for user in users:
for profile_folder in os.listdir(os.path.join(USERS,user,MOZAPPDATA)):
for bookmark_file in os.listdir(os.path.join(USERS,user,MOZAPPDATA,
profile_folder,"bookmarkbackups")):
rv.append(os.path.join(USERS,user,MOZAPPDATA,profile_folder,
"bookmarkbackups",bookmark_file))
return rv
def readfile(fn):
with open(fn,'rb') as fh:
return fh.read()
# The backup files are lz4 compressed and start with "mozLz40"
def readbookmarkfile(fn):
file_content=readfile(fn)
if file_content[0:8]==bytes("mozLz40\x00".encode('ascii')):
file_content = lz4.block.decompress(file_content[8:])
return json.loads(file_content)
def count_links(j,count=0):
if type(j)==dict:
if "children" in j:
for e in j["children"]:
count+=count_links(e)
return count
else:#if no children then it's a link
return 1
assert False
def count_and_validate_flatv(v):
count=0
for j in v:
if "children" in j:
for e in j["children"]:
if e["type"]!="text/x-moz-place": return False, count
count+=1
else:
assert False
return True,count
def grab_all_links(j,depth=0):
rv=[]
if "children" in j:
for e in j["children"]:
if e["type"]=="text/x-moz-place":
rv.append(e)
elif e["type"]=="text/x-moz-place-container":
rv.extend(grab_all_links(e,depth+1))
else:
assert False
return rv
def printkeys(j):
for k,v in j.items():
if k!="children":
print(k,"=",v,sep="")
else:
print(len(v),"children")
print()
def write_pretty(j,fn):
with open(fn, "w") as write_file:
json.dump(j, write_file, indent=4)
# I had a bug where if every item didn't have its own unique id it would fail
# to load in Firefox. I created this dictionary making function to discover
# duplicate ids. In the end I just change all the ids in the big data structure
# rather than trying to keep track during the process of merging.
def id_dict(n,d):
id = n["id"]
if n["type"]=="text/x-moz-place":
if id in d:
d[id]+=1
else:
d[id]=1
elif n["type"]=="text/x-moz-place-container":
if id in d:
d[id]+=1
else:
d[id]=1
if "children" in n:
for sub in n["children"]:
id_dict(sub,d)
else:
assert False
def return_id_dict(n):
d={}
id_dict(n,d)
return d
def fix_all_ids(n,id=100):
n["id"]=id
id+=1
if "children" in n:
for sub in n["children"]:
id=fix_all_ids(sub,id)
return id
def remove_children(j):
rv={}
for k,v in j.items():
if k=="children": continue
rv[k]=v
return rv
def link_anywhere_in_rv(j,rv):
for folder in rv:
for link in folder["children"]:
if j["uri"]==link["uri"]:
return True
return False
# There are a few contradictory ideas here. It is possible to comment out
# if link_anywhere_in_rv() to only search folders with the same name
# first it searches if the link exists anywhere, leave that in to not have dupe
# then it looks for a place for the link to go
# it looks for a matching folder name
# then compares all links. If the folder name matches then it first checks the
# uris for a match. If already in folder skips
# but if not then it returns the destination folder
# if the uri is unique then it returns False signaling to create a place for it
def already_in_rv(link,title,rv):
if link_anywhere_in_rv(link,rv):
#print(link["title"])
return True
for i,j in enumerate(rv):
dest=None
if j["title"]==title:
dest = i
if "children" in j:
for sub in j["children"]:
if sub["uri"]==link["uri"]:
return True
if dest!=None:
return rv[dest]
return False
def merge_link_folder(link,folder,rv,idd):
assert link["type"]=="text/x-moz-place"
assert "children" not in link
assert folder["type"]=="text/x-moz-place-container"
assert type(rv)==list
b = already_in_rv(link,folder["title"],rv)
if b==False:
rv.append(remove_children(folder))
rv[-1]["children"]=[link]
elif type(b)==dict:
if "children" not in b:
b["children"]=[]
b["children"].append(link)
else:
assert b==True
def merge_link_folder_all(folder,rv,idd):
assert folder["type"]=="text/x-moz-place-container"
if "children" not in folder: return
for sub in folder["children"]:
if sub["type"]=="text/x-moz-place":
merge_link_folder(sub,folder,rv,idd)
elif sub["type"]=="text/x-moz-place-container":
merge_link_folder_all(sub,rv,idd)
else:
assert False
# mut is a name for the template structure that has a "menu" "unfiled" and
# "toolbar" folder. I actually later include "mobile" as well.
# This stucture is the empty structure that I merge all the links into since I
# don't want links to fall into those orignal folders and instead to fall into
# alternate ones that are under the root menu folder
def build_mut():
mut=readbookmarkfile("empty_pretty.json")
for each in mut["children"][0]["children"]:
each["children"]=[]
return mut["children"][0]["children"]
def process_alts(first=None):
if first==None:
files=[]
else:
files=[first]
files.extend(bookmarkbackups())
rv=build_mut()
idd={}
for fn in files:
j=readbookmarkfile(fn)
if count_links(j)<10000:
merge_link_folder_all(j,rv,idd)
else:
print(fn)
return rv
def create_merged_json(first=None):
v=process_alts(first)
merged=readbookmarkfile("empty_pretty.json")
merged["children"][0]["children"]=v
print("count =",count_links(merged))
fix_all_ids(merged)
write_pretty(merged,"merged.json")
return merged
merged=create_merged_json(input("Primary bookmark file: "))
|
AndrewWigginCout/bookmarks
|
bookmarks.py
|
bookmarks.py
|
py
| 7,127 |
python
|
en
|
code
| 1 |
github-code
|
6
|
15932158711
|
from ..exceptions import HydraError, ResourceNotFoundError
from . import scenario, network
from .. import db
from ..db.model import ResourceGroup, ResourceGroupItem, Node, Link
from .scenario import _get_scenario
from sqlalchemy.orm.exc import NoResultFound
import logging
log = logging.getLogger(__name__)
def _get_group(group_id):
try:
return db.DBSession.query(ResourceGroup).filter(ResourceGroup.id==group_id).one()
except NoResultFound:
raise ResourceNotFoundError("ResourceGroup %s not found"%(group_id,))
def _get_item(item_id):
try:
item = db.DBSession.query(ResourceGroupItem).filter(ResourceGroupItem.id==item_id).one()
return item
except NoResultFound:
raise ResourceNotFoundError("ResourceGroupItem %s not found"%(item_id,))
def add_resourcegroup(group, network_id,**kwargs):
"""
Add a new group to a network.
"""
group_i = ResourceGroup()
group_i.name = group.name
group_i.description = group.description
group_i.status = group.status
group_i.network_id = network_id
db.DBSession.add(group_i)
db.DBSession.flush()
return group_i
def delete_resourcegroup(group_id, purge_data='N', **kwargs):
"""
Add a new group to a scenario.
"""
group_i = _get_group(group_id)
if purge_data == 'Y':
network._purge_datasets_unique_to_resource('GROUP', group_id)
#This should cascaded to delete all the group items.
db.DBSession.delete(group_i)
db.DBSession.flush()
return 'OK'
def update_resourcegroup(group,**kwargs):
"""
Add a new group to a network.
"""
group_i = _get_group(group.id)
group_i.name = group.name
group_i.description = group.description
group_i.status = group.status
db.DBSession.flush()
return group_i
def add_resourcegroupitem(group_item, scenario_id,**kwargs):
_get_scenario(scenario_id, kwargs['user_id'], check_can_edit=True)
#Check whether the ref_id is correct.
if group_item.ref_key == 'NODE':
try:
db.DBSession.query(Node).filter(Node.id==group_item.ref_id).one()
except NoResultFound:
raise HydraError("Invalid ref ID %s for a Node group item!"%(group_item.ref_id))
elif group_item.ref_key == 'LINK':
try:
db.DBSession.query(Link).filter(Link.id==group_item.ref_id).one()
except NoResultFound:
raise HydraError("Invalid ref ID %s for a Link group item!"%(group_item.ref_id))
elif group_item.ref_key == 'GROUP':
try:
db.DBSession.query(ResourceGroup).filter(ResourceGroup.id==group_item.ref_id).one()
except NoResultFound:
raise HydraError("Invalid ref ID %s for a Group group item!"%(group_item.ref_id))
else:
raise HydraError("Invalid ref key: %s"%(group_item.ref_key))
group_item_i = ResourceGroupItem()
group_item_i.scenario_id = scenario_id
group_item_i.group_id = group_item.group_id
group_item_i.ref_key = group_item.ref_key
if group_item.ref_key == 'NODE':
group_item_i.node_id = group_item.ref_id
elif group_item.ref_key == 'LINK':
group_item_i.link_id = group_item.ref_id
elif group_item.ref_key == 'GROUP':
group_item_i.subgroup_id = group_item.ref_id
db.DBSession.add(group_item_i)
db.DBSession.flush()
return group_item_i
def delete_resourcegroupitem(item_id,**kwargs):
group_item_i = _get_item(item_id)
_get_scenario(group_item_i.scenario_id, kwargs['user_id'], check_can_edit=True)
db.DBSession.delete(group_item_i)
db.DBSession.flush()
return 'OK'
|
hydraplatform/hydra-base
|
hydra_base/lib/groups.py
|
groups.py
|
py
| 3,757 |
python
|
en
|
code
| 8 |
github-code
|
6
|
34886436598
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the dayOfProgrammer function below.
def dayOfProgrammer(year):
if year == 1918:
return '26.09.1918'
date = 256 - 243
# 1917 ke bawah kabisat hanya bisa di modulasi 4
is_leap_year_under_1917 = (year <= 1917 and year % 4 == 0)
# diatas 1918 aturan kabisat diubah
is_leap_year = ((year > 1918) & (year%400 == 0 or ((year%4 == 0) & (year%100 != 0))))
# if year <= 1917 and year > 1918:
# if is_leap_year or year <= 1800:
# date = 256 - 244
if is_leap_year_under_1917 or is_leap_year:
date = 256 - 244
day_of_programmer = "%s.09.%d" % (date, year)
return day_of_programmer
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
year = int(input().strip())
result = dayOfProgrammer(year)
fptr.write(result + '\n')
fptr.close()
|
nipeharefa/hackkerrank-problemsolving-practice
|
day-of-the-programmer.py
|
day-of-the-programmer.py
|
py
| 930 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9695092385
|
#!/bin/python3
import datetime
import json
import threading
import time
import turtle
import sys
from urllib import request
from collections import namedtuple
class ISS():
def __init__(self):
self.is_instance = True
self._astronauts_url = 'http://api.open-notify.org/astros.json'
self._location_url = 'http://api.open-notify.org/iss-now.json'
self._location_tuple = namedtuple(
'Location', ['latitude', 'longitude'])
self._location()
def __enter__(self):
return self
def __exit__(self, exctype, excinst, exctb):
self.is_instance = False
def __repr__(self):
return (f'{self.__class__.__name__}:\n\tTimestamp:{self._update_timestamp}\n\tLocation:{self.location}\n\tPeople: {self.people_in_space}')
def _get_page(self, url):
response = request.urlopen(url)
result = json.loads(response.read())
return result
def _location(self):
result = self._get_page(self._location_url)
self.location = self._location_tuple(result['iss_position']['latitude'],
result['iss_position']['longitude'])
self._update_timestamp = result['timestamp']
@property
def people_in_space(self):
result = self._get_page(self._astronauts_url)
return [people['name'] for people in result['people']]
class Tracker(ISS):
def __init__(self):
super().__init__()
self._bgpic = 'images/map.gif'
self._shape = 'images/iss2.gif'
self._screen = turtle.Screen()
self._screen.title('Python ISS Tracker')
self._screen.setup(width=720, height=360)
self._screen.setworldcoordinates(-180, -90, 180, 90)
self._screen.bgpic(self._bgpic)
self._screen.register_shape(self._shape)
self._screen.onscreenclick(self.update_turtle_location, btn=1)
self._tracker = turtle.Turtle()
self._tracker.shape(self._shape)
self._tracker.setheading(90)
def update_turtle_location(self, *args):
self._location()
self._tracker.penup()
self._tracker.goto(float(self.location[0]), float(self.location[1]))
# Debug
print(self.__repr__())
if __name__ == '__main__':
try:
with Tracker() as iss:
iss.update_turtle_location()
turtle.mainloop()
except KeyboardInterrupt:
sys.exit(0)
# # http://open-notify.org/Open-Notify-API/
# url = 'http://api.open-notify.org/astros.json'
# response = urllib.request.urlopen(url)
# result = json.loads(response.read())
# print('People in Space: ', result['number'])
# people = result['people']
# for p in people:
# print(p['name'], ' in ', p['craft'])
# url = 'http://api.open-notify.org/iss-now.json'
# response = urllib.request.urlopen(url)
# result = json.loads(response.read())
# location = result['iss_position']
# lat = float(location['latitude'])
# lon = float(location['longitude'])
# print('Latitude: ', lat)
# print('Longitude: ', lon)
# screen = turtle.Screen()
# screen.setup(720, 360)
# screen.setworldcoordinates(-180, -90, 180, 90)
# screen.bgpic('map.gif')
# screen = turtle.Screen()
# screen.setup(720, 360)
# screen.setworldcoordinates(-180, -90, 180, 90)
# # image source:
# # map.jpg: http://visibleearth.nasa.gov/view.php?id=57752 Credit: NASA
# screen.bgpic('map.gif')
# screen.register_shape('iss2.gif')
# iss = turtle.Turtle()
# iss.shape('iss2.gif')
# iss.setheading(90)
# iss.penup()
# iss.goto(lon, lat)
# # When Does ISS next pass over me?
# #london
# #lat = 51.5072
# #lon = 0.1275
# # Tokyo
# #lat = 35.689487
# #lon = 139.691706
# # Space Center, Houston
# lat = 29.5502
# lon = -95.097
# location = turtle.Turtle()
# location.penup()
# location.color('yellow')
# location.goto(lon, lat)
# location.dot(5)
# location.hideturtle()
# url = 'http://api.open-notify.org/iss-pass.json?lat=' + \
# str(lat) + '&lon=' + str(lon)
# response = urllib.request.urlopen(url)
# result = json.loads(response.read())
# #print result
# over = result['response'][1]['risetime']
# location.write(time.ctime(over))
|
mattbhenley/ISS_Locator
|
locator.py
|
locator.py
|
py
| 4,144 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33851082611
|
def parse(line):
f = line.split(" ")
return f if len(f) == 1 else (f[0], int(f[1]))
def parse_items(items_l):
f = items_l.split(":")
return [int(i) for i in f[1].split(",")]
def parse_operation_function(operation_l):
f = operation_l.split("=")
g = f[1].split(" ")
if g[-1].isdigit():
if g[-2] == '+':
return lambda x: (x + int(g[-1])) // 3
else:
return lambda x: (x * int(g[-1])) // 3
else:
if g[-2] == '+':
return lambda x: (x + x) // 3
else:
return lambda x: (x * x) // 3
def parse_test_function(test_cond, true_cond, false_cond):
cond_num = int(test_cond.split(" ")[-1])
true_cond_num = int(true_cond.split(" ")[-1])
false_cond_num = int(false_cond.split(" ")[-1])
return lambda x: true_cond_num if x % cond_num == 0 else false_cond_num
def exec():
with open("input2.txt") as fp:
lines = [l.strip() for l in fp if l.strip()]
# Parse input
items = []
test = []
op = []
for start_line in range(0, len(lines), 6):
items.append(parse_items(lines[start_line + 1]))
op.append(parse_operation_function(lines[start_line + 2]))
test.append(parse_test_function(lines[start_line + 3], lines[start_line + 4], lines[start_line + 5]))
num_monkeys = len(items)
# Process input
items_processed = [0 for i in range(0, num_monkeys)]
for rnd in range(0, 20):
for m in range(0, num_monkeys):
items_processed[m] = items_processed[m] + len(items[m])
for i in items[m]:
worry_lvl = op[m](i)
dest_monkey = test[m](worry_lvl)
items[dest_monkey].append(worry_lvl)
items[m] = []
# print(f'Round {rnd + 1}: {items}')
print(items_processed)
s = sorted(items_processed)
print(s[-1] * s[-2])
if __name__ == '__main__':
exec()
|
akepa/advent-of-code-2022
|
day11/day11.py
|
day11.py
|
py
| 1,934 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25968516319
|
"""added san and is_my_move to Move
Revision ID: f39051a2ca9b
Revises: c9b0d072e5e4
Create Date: 2020-12-16 13:05:46.434429
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f39051a2ca9b'
down_revision = 'c9b0d072e5e4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('moves', schema=None) as batch_op:
batch_op.add_column(sa.Column('is_my_move', sa.Boolean(), nullable=True))
batch_op.add_column(sa.Column('san', sa.String(length=8), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('moves', schema=None) as batch_op:
batch_op.drop_column('san')
batch_op.drop_column('is_my_move')
# ### end Alembic commands ###
|
joshua-stauffer/opening-book-api
|
migrations/versions/f39051a2ca9b_added_san_and_is_my_move_to_move.py
|
f39051a2ca9b_added_san_and_is_my_move_to_move.py
|
py
| 929 |
python
|
en
|
code
| 2 |
github-code
|
6
|
70827770107
|
from selenium import webdriver
from datetime import datetime
import boto3
import os
import time
now = datetime.now()
folder_name = now.strftime("%Y%m%d")
image_name = "traffic_" + now.strftime("%Y%m%d") + "-" + now.strftime("%H-%M") + ".png"
Bucket_name = "googletrafficmap"
prefix = folder_name + "/"
#Get map snapshot
driver = webdriver.PhantomJS(service_log_path=os.path.devnull)
driver.set_window_size(1920, 1080) # set the window size that you need
driver.get('http://googletrafficmap.s3-website.ca-central-1.amazonaws.com')
# driver.save_screenshot(folder_name + "/" + image_name)
screenshotPNG = driver.get_screenshot_as_png() #Get screenshot in binary data
#Create low-client connection
client = boto3.client('s3')
#Uploading image to s3 bucket and creating folder structure at the same time
client.put_object(
Bucket = Bucket_name,
Body = screenshotPNG,
Key = folder_name + "/" + image_name
)
time.sleep(60)
driver.close()
driver.quit()
|
nathan36/GoogleTrafficMap-GIF
|
GoogleTrafficMap-GIF/saveImage.py
|
saveImage.py
|
py
| 993 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44632720863
|
#!/usr/bin/env python
import sys
import shutil
from typing import Optional, List, Tuple, Dict
import typer
from rich import print
from rich.columns import Columns
from rich.console import Console
from rich.traceback import install
# fmt: off
# Mapping from topics to colors
TOPICS = {
"TIMR": "#9a9a99",
"VOTE": "#67a0b2",
"LEAD": "#d0b343",
"TERM": "#70c43f",
"LOG1": "#4878bc",
"LOG2": "#398280",
"CMIT": "#98719f",
"PERS": "#d08341",
"SNAP": "#FD971F",
"DROP": "#ff615c",
"CLNT": "#00813c",
"TEST": "#fe2c79",
"INFO": "#ffffff",
"WARN": "#d08341",
"ERRO": "#fe2626",
"TRCE": "#fe2626",
}
# fmt: on
def list_topics(value: Optional[str]):
if value is None:
return value
topics = value.split(",")
for topic in topics:
if topic not in TOPICS:
raise typer.BadParameter(f"topic {topic} not recognized")
return topics
def main(
file: typer.FileText = typer.Argument(None, help="File to read, stdin otherwise"),
colorize: bool = typer.Option(True, "--no-color"),
n_columns: Optional[int] = typer.Option(None, "--columns", "-c"),
ignore: Optional[str] = typer.Option(None, "--ignore", "-i", callback=list_topics),
just: Optional[str] = typer.Option(None, "--just", "-j", callback=list_topics),
):
topics = list(TOPICS)
# We can take input from a stdin (pipes) or from a file
input_ = file if file else sys.stdin
# Print just some topics or exclude some topics (good for avoiding verbose ones)
if just:
topics = just
if ignore:
topics = [lvl for lvl in topics if lvl not in set(ignore)]
topics = set(topics)
console = Console()
width = console.size.width
panic = False
for line in input_:
try:
time, topic, *msg = line.strip().split(" ")
# To ignore some topics
if topic not in topics:
continue
msg = " ".join(msg)
# Debug calls from the test suite aren't associated with
# any particular peer. Otherwise we can treat second column
# as peer id
if topic != "TEST":
i = int(msg[1])
# Colorize output by using rich syntax when needed
if colorize and topic in TOPICS:
color = TOPICS[topic]
msg = f"[{color}]{msg}[/{color}]"
# Single column printing. Always the case for debug stmts in tests
if n_columns is None or topic == "TEST":
print(time, msg)
# Multi column printing, timing is dropped to maximize horizontal
# space. Heavylifting is done through rich.column.Columns object
else:
cols = ["" for _ in range(n_columns)]
msg = "" + msg
cols[i] = msg
col_width = int(width / n_columns)
cols = Columns(cols, width=col_width - 1, equal=True, expand=True)
print(cols)
except:
# Code from tests or panics does not follow format
# so we print it as is
if line.startswith("panic"):
panic = True
# Output from tests is usually important so add a
# horizontal line with hashes to make it more obvious
if not panic:
print("#" * console.width)
print(line, end="")
if __name__ == "__main__":
typer.run(main)
|
fansehep/Raft_Key-Value
|
RAFT_6_824/src/raft/dslogs.py
|
dslogs.py
|
py
| 3,483 |
python
|
en
|
code
| 4 |
github-code
|
6
|
40428996054
|
# python3
class Node:
def __init__(self, key=None, next=None):
self.key = key
self.next = next
class LinkedList:
def __init__(self):
self.head = None
def insert(self, key):
self.head = Node(key, self.head)
def check(self):
if not self.head:
print()
else:
node = self.head
while node:
print(node.key, end=' ')
node = node.next
print()
def delete(self, key):
if self.head:
if self.head.key == key:
self.head = self.head.next
else:
node = self.head
while node and node.next:
if node.next.key == key:
node.next = node.next.next
break
node = node.next
def find(self, key):
node = self.head
while node:
if node.key == key:
return 'yes'
node = node.next
return 'no'
def polyhash(S, p, x):
hash = 0
for i in range(len(S)-1, -1, -1):
hash = ((hash*x + ord(S[i])) % p)
return hash
if __name__ == "__main__":
m = int(input())
n = int(input())
queries = [input() for i in range(n)]
hash_map = [LinkedList() for i in range(m)]
for i in queries:
q = i.split()
if q[0] == 'add':
index = polyhash(q[1], 1000000007, 263) % m
if hash_map[index].find(q[1]) == 'no':
hash_map[index].insert(q[1])
elif q[0] == 'del':
hash_map[polyhash(q[1], 1000000007, 263) % m].delete(q[1])
elif q[0] == 'find':
print(hash_map[polyhash(q[1], 1000000007, 263) % m].find(q[1]))
else:
hash_map[int(q[1])].check()
|
probhakarroy/Algorithms-Data-Structures
|
Data Structures/week4/hashing_chaining.py
|
hashing_chaining.py
|
py
| 1,831 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3035731078
|
i = 1
f = 1.2
b = True
l = [1, 2, 3]
d = {'a': 1, 'b': 2}
s = {1, 2, 3}
t = (1, 2, 3)
def greeting():
print('Hello world')
def func(something):
return something
# Appending a function to a list
l.append(greeting)
# Adding a function to dictionary
d.update({'greeting': greeting})
|
Archana-SK/python_tutorials
|
Decorators/_functions_as_normal_objects.py
|
_functions_as_normal_objects.py
|
py
| 301 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18529816397
|
# 对学生基本信息进行画像展示分析。包括性别、年级、班级、住址、班主任等形成学生画像标签。
# 对个体维度对学生学业情况进行描述性统计分析。
# 对成绩情况进行统计,并汇总各个科目历史考试成绩趋势,
# 明确学生当前学科成绩分布特点以及未来成绩趋势,为学业干预提供输入。
# 度量指标如原始分、得分率、标准分(Z以及T分)、全年级排名、全班排名、离均值等。
# 学生消费画像分析,通过对学生一卡通消费数据,分析学生消费情况;
# 支持消费分布数据统计分析;
# 如消费趋势对比,对消费进行预警,便于了解学生生活方式尤其是贫困生,并及时干预支持消费明细的查询。
# 学生考勤画像分析,如学生考勤数据统计:如缺勤、迟到、请假、到勤的比例和实际天数;
class Student:
def __init__(self,name='', stuId=0,sex='', grade=0, cla=0, addr='', headTeacher='', scores=[]):
self.stuId = stuId
self.name = name
self.sex = sex
self.grade = grade
self.cla = cla
self.addr = addr
self.headTeacher = headTeacher
self.scores = scores
class Teacher:
"""
1_teacher.csv:包含了近五年各班各学科的教师信息
term:学期
cla_id:班级ID
cla_Name:班级名
gra_Name:年级名
sub_id:学科ID
sub_Name:学科名
bas_id:教师id
bas_Name:教师名
"""
def __init__(self,term, claID, claName, graName, subID, subName, basID, basName):
self.term = term
self.claID = claID
self.claName = claName
self.graName = graName
self.subID = subID
self.subName = subName
self.basID = basID
self.basName = basName
class Score:
"""
mes_TestID,考试id
exam_number,考试编码
exam_numname,考试编码名称
mes_sub_id,考试学科id
mes_sub_name,考试学科名
exam_term,考试学期
exam_type,考试类型(对应考试类型表)
exam_sdate,考试开始时间
mes_StudentID,学生id
mes_Score,考试成绩(-1为作弊,-2为缺考,-3为免考)
mes_Z_Score,换算成Z-score
mes_T_Score,换算成T-score
mes_dengdi:换算成等第
"""
def __init__(self,TestID, exam_number, exam_numname, sub_id,
sub_name, exam_term, exam_type, exam_sdate, StudentID,
score, Z_score, T_score, dengdi):
self.TestID = TestID
self.exam_number = exam_number
self.exam_numname = exam_numname
self.sub_id =sub_id
self.sub_name = sub_name
self.exam_term = exam_term
self.exam_type = exam_type
self.exam_sdate = exam_sdate
self.studentID = StudentID
self.score = score
self.T_score = T_score
self.Z_score = Z_score
self.dengdi = dengdi
# 班级
|
kjp96/tianchi
|
ClassDef.py
|
ClassDef.py
|
py
| 2,928 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
35168238376
|
from serpent.game_agent import GameAgent
from serpent.input_controller import KeyboardKey
import offshoot
class SerpentSuperHexagonGameAgent(GameAgent):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.frame_handlers["PLAY"] = self.handle_play
self.frame_handler_setups["PLAY"] = self.setup_play
self.analytics_client = None
#Ccontext class setup
plugin_path = offshoot.config["file_paths"]["plugins"]
context_classifier_path = "datasets/context_classifier.model"
from serpent.machine_learning.context_classification.context_classifiers.cnn_inception_v3_context_classifier import \
CNNInceptionV3ContextClassifier
context_classifier = CNNInceptionV3ContextClassifier(
input_shape=(240, 384, 3)) # Replace with the shape (rows, cols, channels) of your captured context frames
context_classifier.prepare_generators()
context_classifier.load_classifier(context_classifier_path)
self.machine_learning_models["context_classifier"] = context_classifier
def setup_play(self):
# self.input_controller.tap_key(KeyboardKey.KEY_SPACE)
pass
def handle_play(self, game_frame):
# for i, game_frame in enumerate(self.game_frame_buffer.frames):
# self.visual_debugger.store_image_data(
# game_frame.frame,
# game_frame.frame.shape,
# str(i)
# )
# self.input_controller.tap_key(KeyboardKey.KEY_RIGHT)
context = self.machine_learning_models["context_classifier"].predict(game_frame.frame)
print("Context:", context)
|
cameron-j-knight/General-AI
|
plugins/SerpentSuperHexagonGameAgentPlugin/files/serpent_SuperHexagon_game_agent.py
|
serpent_SuperHexagon_game_agent.py
|
py
| 1,675 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74637081787
|
import socket
import time
from PyQt5.QtCore import QTimer, QThread
import queue
import logging
import pyaudio
import threading
logging.basicConfig(format="%(message)s", level=logging.INFO)
class AudioRec(QThread):
def __init__(self, threadChat):
super().__init__()
self.threadChat = threadChat
self.host_name = socket.gethostname()
self.host_ip = socket.gethostbyname(self.host_name)
# self.host_ip = '127.0.0.1'
self.port = 9634
self.socket_address = (self.host_ip, self.port)
# a maxsize 100 will be ideal but lags with video at the moment
# must send frames from server VideoGen and make sync in client
# using audio and frame timestamps
self.q = queue.Queue(maxsize=5)
self.BUFF_SIZE = 65536
self.audio_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.audio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.BUFF_SIZE)
self.audio_socket.bind(self.socket_address)
self.p = pyaudio.PyAudio()
self.CHUNK = 1024
self.stream = self.p.open(format=self.p.get_format_from_width(2),
channels=2,
rate=44100,
output=True,
frames_per_buffer=self.CHUNK)
self.timer = QTimer()
self.timer.timeout.connect(self.play_audio)
self.timer.start(1000 * 0.8 * self.CHUNK / 44100)
t1 = threading.Thread(target=self.get_audio_data, args=())
t1.start()
print('Listening for audio...')
def get_audio_data(self):
while self.threadChat.nickname == "":
# print('wait audio')
# time.sleep(0.1)
pass
while True:
try:
self.frame, _ = self.audio_socket.recvfrom(self.BUFF_SIZE)
self.q.put(self.frame)
except BlockingIOError:
pass
except Exception as e:
logging.error(e)
def play_audio(self):
if not self.q.empty():
frame = self.q.get()
self.stream.write(frame)
|
shully899509/OpenParty
|
app/client/ClientAudio.py
|
ClientAudio.py
|
py
| 2,191 |
python
|
en
|
code
| 0 |
github-code
|
6
|
92459438
|
print("Mustafa kapasi")
# a = int(input("Enter your name:"))
# print(a)
# b = [1 , 3, 4, 5]
# print(b)
# print(b[1])
# b[0] = 35
# print(b)
# c = (45 , 32, 43, 12)
# print(c)
# c[0] = 24
# print(c)
# calculating orders
print("Welcome to Our Restaurant")
a1 = 0
a2 = 0
b1 = 0
b2 = 0
print("Enter 1 for punjabi menu and 2 for gujarati menu : ")
ch = int(input())
if (ch==1) :
print("Panner bhurji 100Rs\nPanner toofani 130Rs")
print("Now enter 1 for panner bhurji and 2 for panner toofani : ")
a = int(input())
if(a==1) :
a1 = a1 + 1
elif (a==2) :
a2 = a2 + 1
else :
print("enter number 1 or 2")
elif (ch==2) :
print("Sev tameta 100Rs\nFlower bataka 140Rs")
print("Now enter 1 for Sev tameta and 2 for flower bateka : ")
b = int(input())
if(b==1) :
b1 = b1 + 1
elif (b==2) :
b2 = b2 + 1
else :
print("enter number 1 or 2")
else :
print("enter number 1 or 2")
|
Mustu19/Python-Programs
|
revision1.py
|
revision1.py
|
py
| 970 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7945180135
|
def merge_files(filenames, output_filename):
with open(output_filename, 'a') as output_file:
for filename in filenames:
with open(filename, 'r') as input_file:
output_file.write(input_file.read())
output_file.write('\n') # Add a new line between each file's content
# List to store input filenames
input_files = []
# Accept eight text file paths from the user
for i in range(1, 9):
file_path = input(f"Enter path of text file {i}: ")
input_files.append(file_path)
# Output filename
output_file = 'all.txt'
# Merge the files
merge_files(input_files, output_file)
print("Merging completed! The content of all the files has been appended into 'all.txt'.")
|
eilishbaby/spam_toolss
|
compilation.py
|
compilation.py
|
py
| 723 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41843820390
|
import numpy as np
from sklearn.metrics import f1_score
from sklearn.naive_bayes import BernoulliNB
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
class NaiveBayes:
"""
Naive Bayes implementation based on Multi-variate Bernoulli using python
"""
def __init__(self):
"""
self.class_probability --> Class probability of shape (output_label, ). It indicates the probability of alpha label appearing
without seeing the input data.
self.phi --> Probability of alpha input feature given alpha output label. P(x|y_train). shape (output_label, input_feature)
self.output_label --> Number of output class
self.input_feature --> Number of input feature
"""
self.class_probability = None
self.phi = None
self.output_label = None
def fit(self, x_train, y_train):
"""
Train the model
:param x_train: Input training example of shape (number of training data, input feature)
:param y_train: Output training example of shape (number of training data, )
:return:
"""
m = x_train.shape[0] # Number of training example
# Flatten the training set
x_train = x_train.reshape(m, -1)
input_features = x_train.shape[1]
self.output_label = len(np.unique(y_train.reshape(-1)))
# Initialize everything with zero
self.class_probability = np.zeros(self.output_label)
self.phi = np.zeros((self.output_label, input_features))
# Calculate class probability and phi
for label in range(self.output_label):
# Extract the training data from an individual labels
current_label_data = x_train[y_train == label]
# Number of occurarances of this particular label in the training set
current_label_occur = current_label_data.shape[0]
# Class label of this training data
self.class_probability[label] = (current_label_occur + 1) / (m + self.output_label)
# Calculate phi for an individual label
# How many times each of the input feature appeared for this label
# One is added for laplace smoothing
input_feature_occur = np.sum(current_label_data, axis=0) + 1
# Fix the denominator according to the laplace smoothing
curr_label_laplace_smoothing = current_label_occur + self.output_label
# Calculate phi
self.phi[label, :] = input_feature_occur / curr_label_laplace_smoothing
def predict(self, x_test):
"""
Make prediction
:param x_test: data to predict of shape (number of prediction, input feature)
:return:
"""
# Number of prediction
num_of_test = x_test.shape[0]
# Probability of each of the class.
# Initially each of the label will have zero probability
probabilities = np.zeros((num_of_test, self.output_label))
# Calculate for all test
for test_index in range(num_of_test):
# Count probabilities for each of the classes
for label in range(self.output_label):
# First get all the words present in this test example
words_for_this_example = x_test[test_index] == 1
# Get the calculated probabilities for this label and this ese words example
words_probabilities = self.phi[label][words_for_this_example]
# Multiply all these probability
words_probability_multiply = np.prod(words_probabilities)
# Multiply this with class_probability probabilities/class probabilities
# to get the overall probability of this example
probabilities[test_index, label] = words_probability_multiply * self.class_probability[label]
# Normalize the probabilities
probabilities[test_index] /= np.sum(probabilities[test_index])
# return the maximum probability index
return np.argmax(probabilities, axis=1)
def get_f1_score(self, x_test, y_test):
"""
Calculate the f1 score of our model
:param x_test:
:param y_test:
:return:
"""
return f1_score(y_test, self.predict(x_test))
if __name__ == '__main__':
# Test our model with amazon cell review dataset.
# Where model will predict if alpha review is positive or negetive
# Read the dataset
with open("amazon_cells_labelled.txt", "r") as file:
# This will contain all the sentences
sentences = []
# This will contain all the output label(0, 1)
labels = []
for line in file.readlines():
# The label and sentences are separated with alpha tab
line_arr = line.strip().split("\t")
# Remove stop words
sentences.append(line_arr[0])
labels.append(int(line_arr[1]))
# Vectorize the training sentences
vectorizer = CountVectorizer(analyzer="word", lowercase=True, stop_words="english", max_features=4500)
data = vectorizer.fit_transform(sentences).toarray()
# Split data
x_train, x_test, y_train, y_test = train_test_split(data, np.array(labels))
# Fit to our model
naive_bayes = NaiveBayes()
naive_bayes.fit(x_train, y_train)
model_f1_score = naive_bayes.get_f1_score(x_test, y_test)
print("F1 score of test set of our model is : ", str(model_f1_score))
# Compare with scikit-learn model
sci_naive_bayes = BernoulliNB()
sci_naive_bayes.fit(x_train, y_train)
sk_prediction = sci_naive_bayes.predict(x_test)
print("F1 score of the test set of scikit learn model is : ", str(f1_score(y_test, sk_prediction)))
|
gmortuza/machine-learning-scratch
|
machine_learning/bayesian/naive_bayes/naive_bayes.py
|
naive_bayes.py
|
py
| 5,821 |
python
|
en
|
code
| 6 |
github-code
|
6
|
30791686316
|
from numba import *
from numba import error
#@autojit
def func():
if x:
print("hello")
else:
print("world")
def compile_func1():
try:
jit(void())(func)
except error.NumbaError as e:
print("exception: %s" % e)
__doc__ = """
>>> compile_func1()
--------------------- Numba Encountered Errors or Warnings ---------------------
if x:
-------^
Error 6:7: No global named 'x'
--------------------------------------------------------------------------------
exception: 6:7: No global named 'x'
"""
#@autojit
def func2():
print(10[20])
def compile_func2():
try:
jit(void())(func2)
except error.NumbaError as e:
print("exception: %s" % e)
__doc__ += """>>> compile_func2()
--------------------- Numba Encountered Errors or Warnings ---------------------
print(10[20])
----------^
Error 29:10: object of type int cannot be indexed
--------------------------------------------------------------------------------
exception: 29:10: object of type int cannot be indexed
"""
@autojit # this often messes up line numbers
def func_decorated():
print(10[20])
def compile_func3():
try:
func_decorated()
except error.NumbaError as e:
print("exception: %s" % e)
__doc__ += """
>>> compile_func3()
--------------------- Numba Encountered Errors or Warnings ---------------------
print(10[20])
----------^
Error 48:10: object of type int cannot be indexed
--------------------------------------------------------------------------------
exception: 48:10: object of type int cannot be indexed
"""
if __name__ == '__main__':
import numba
numba.testmod()
|
garrison/numba
|
numba/tests/test_reporting.py
|
test_reporting.py
|
py
| 1,664 |
python
|
en
|
code
| null |
github-code
|
6
|
30162086505
|
from tkinter import *
fenetre = Tk()
import debug as de
import sauvegarde as sauvegarde
import plateau as plateau
import pions as pions
import gestionnaire_evenements as g_evenements
import menu as menu
import gestionnaire_images as g_images
import ast
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 13 18:48:54 2017
@author: Pierre Monrocq
"""
#Paramètres d'affichage
fenetre.title('Latroncules')
FENETRE_LARGEUR = "1000"
FENETRE_HAUTEUR = "580"
fenetre.geometry(FENETRE_LARGEUR+"x"+FENETRE_HAUTEUR)
fenetre.resizable(0, 0)
can = Canvas(fenetre,height=FENETRE_HAUTEUR,width=FENETRE_LARGEUR,bg="#d5d5d5")
#Menu
imgmenu = can.create_image(500,290,image=g_images.menu)
bm = Button(can, text="NOUVELLE PARTIE",fg="#f5f5f5",bg="#262625",highlightthickness = 0,bd=0,borderwidth=5,font = ('Helvetica',12))
bm2 = Button(can, text="CONTINUER PARTIE",fg="#f5f5f5",bg="#262625",highlightthickness = 0,bd=0,borderwidth=5,font = ('Helvetica',12))
bm4 = Button(can, text="QUITTER",fg="#f5f5f5",bg="#262625",highlightthickness = 0,bd=0,borderwidth=5,font = ('Helvetica',12))
bm3 = Button(can, text="AIDE",fg="#f5f5f5",bg="#262625",highlightthickness = 0,bd=0,borderwidth=5,font = ('Helvetica',12))
bm.configure(width=20,height=1,activebackground = "#b9b8b6", relief = FLAT, command= lambda: lancer_jeu(can))
bm2.configure(width=20,height=1,activebackground = "#b9b8b6", relief = FLAT, command= lambda: lancer_jeu(can,True),state=DISABLED)
bm3.configure(width=20,height=1,activebackground = "#b9b8b6", relief = FLAT,command= lambda: print("Merci de consulter le mode mode d’emploi, situé dans le dossier rapport."))
bm4.configure(width=20,height=1,activebackground = "#b9b8b6", relief = FLAT, command=fenetre.destroy)
bm.place(relx=0.5,rely=0.48,anchor=CENTER)
bm2.place(relx=0.5,rely=0.38,anchor=CENTER)
bm4.place(relx=0.5,rely=0.68,anchor=CENTER)
bm3.place(relx=0.5,rely=0.58,anchor=CENTER)
can.pack()
if(sauvegarde.fichier_existant() == True):#Si il y a déjà une partie enregistrée
bm2.configure(state=NORMAL)
can.pack()
def lancer_jeu(can,*continuer):
"""
Lance le jeu et appel les autres fonctions
:param can: Canvas Tkinter
:type can: Objet Tkinter
:param continuer: Si la condition continuer est vraie le jeu charge une partie existante (default false) (facultatif)
:type continuer: Booléen
"""
#Debut du calcul du temps d'execution
deb = de.afficher_temps_execution_debut()
#suppression du menu
global bm,bm2,bm3,bm4,imgmenu
bm2.configure(state=NORMAL)
menu.supprimer_boutons_menu(bm,bm2,bm3,bm4)
menu.supprimer_fond_menu(can,imgmenu)
#Fonctions de création du plateau
plateau.generer_plateaux(can,g_images.case_noire_iso,g_images.case_blanche_iso,g_images.case_noire_plat,g_images.case_blanche_plat)
if(True in continuer):#Chargement d'une partie existante
grille = ast.literal_eval(sauvegarde.charger_grille_partie())
pions.tour = int(sauvegarde.charger_tour_partie())
pions.afficher_pions_captures(can,grille,"C","c",g_images.cavalier_blanc_plat,g_images.cavalier_noir_plat)
else:#sinon creation d'une nouvelle partie
grille = pions.ajouter_pions_grille(8)
pions.afficher_pions_plateau_plat(grille,can)
pions.afficher_pions_plateau_iso(grille,can)
g_evenements.afficher_tour(can,pions.tour,g_images.tourn,g_images.tourb)
de.afficher_temps_execution_fin(deb, "Lancement en")
#Boutons en jeu
but = Button(can, text="CHANGER DE VUE", command= lambda: g_evenements.changement_vue(can),fg="#f5f5f5",bg="#8c8c8b",highlightthickness = 0,bd=0,borderwidth=5)
but.configure(width = 14, activebackground = "#b9b8b6", relief = FLAT)
but.place(relx=0.1,rely=0.05,anchor=CENTER)
b2 = Button(can, text="NOUVELLE PARTIE",fg="#f5f5f5",bg="#8c8c8b",highlightthickness = 0,bd=0,borderwidth=5)
b2.configure(width = 14, activebackground = "#b9b8b6", relief = FLAT,command=lambda: g_evenements.partie_termine(fenetre,can))
b2.place(relx=0.9,rely=0.05,anchor=CENTER)
#Evenements
can.bind("<Button-1>",lambda event: pions.selectionner_pion(event,grille,can))
can.bind("<Button-3>",lambda event: pions.deplacer_pion(event,fenetre,grille,can))
fenetre.protocol("WM_DELETE_WINDOW",lambda: g_evenements.confirmer_quitter(fenetre,grille,pions.tour))
#Execution de la boucle principale Tkinter
fenetre.mainloop()
|
PierreMonrocq/L1-Latroncules-game
|
Main.py
|
Main.py
|
py
| 4,373 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10702615749
|
# pylint: disable=E0401
import js
import functools
from pyodide.ffi import create_once_callable, create_proxy
from enum import Enum
from typing import Callable, TypedDict, Sequence, MutableMapping, Union
State = MutableMapping[str, Union[str, int, dict, list, None]]
Actions = dict[str, Callable]
Attributes = dict[str, Union[str, int, tuple[str]]]
class VDom(TypedDict):
node_name: str
attributes: Attributes
children: Sequence[Union[str, "VDom"]]
def p(
node_name: str, attributes: Attributes, children: Sequence[Union[str, "VDom"]]
) -> VDom:
if not isinstance(children, Sequence):
return {
"node_name": node_name,
"attributes": attributes,
"children": [children],
}
return {"node_name": node_name, "attributes": attributes, "children": children}
class App:
def __init__(
self,
selector: str,
state: State,
view: Callable[[State, Actions], VDom],
actions: Actions,
):
def dispatch_action(action, state, data):
action(state, data)
self.resolve_node()
self.view = view
self.state = state
self.actions = {
name: functools.partial(dispatch_action, action)
for name, action in actions.items()
}
self.skip_render = False
self.new_node = None
self.current_node = None
self.dom_manager = DomManager(selector)
self.resolve_node()
def resolve_node(self):
self.new_node = self.view(self.state, self.actions)
self.schedule_render()
def render(self, _):
self.dom_manager.render(self.new_node)
self.skip_render = False
def schedule_render(self):
if not self.skip_render:
self.skip_render = True
js.requestAnimationFrame(create_once_callable(self.render))
class DomManager:
def __init__(self, selector: str) -> None:
self.element = js.document.querySelector(selector)
self.element.innerHTML = ""
self.v_current_node = None
class ChangeType(Enum):
NONE = 1
TYPE = 2
TEXT = 3
NODE = 4
VALUE = 5
ATTR = 6
def render(self, v_new_node):
if self.v_current_node:
self.update_element(self.element, self.v_current_node, v_new_node)
else:
self.element.appendChild(self.create_element(v_new_node))
self.v_current_node = v_new_node
def create_element(self, v_node):
if not self.is_v_node(v_node):
return js.document.createTextNode(str(v_node))
element = js.document.createElement(v_node["node_name"])
self.set_attributes(element, v_node["attributes"])
for child in v_node["children"]:
element.appendChild(self.create_element(child))
return element
def set_attributes(self, element, attributes):
for attr, value in attributes.items():
if self.is_event_attr(attr):
element.addEventListener(attr[2:].lower(), create_proxy(value))
else:
element.setAttribute(str(attr), value)
def update_element(
self, parent_node, v_current_node, v_new_node, current_node_index=0
):
if not v_current_node:
parent_node.appendChild(self.create_element(v_new_node))
return
current_node = (
parent_node.childNodes[current_node_index]
if len(parent_node.childNodes) > current_node_index
else parent_node.childNodes[-1]
)
if not v_new_node:
parent_node.removeChild(current_node)
return
change_type = self.change_type(v_current_node, v_new_node)
if change_type in [
self.ChangeType.TYPE,
self.ChangeType.TEXT,
self.ChangeType.NODE,
]:
parent_node.replaceChild(self.create_element(v_new_node), current_node)
if change_type == self.ChangeType.VALUE:
current_node.value = v_new_node["attributes"].get("value")
if change_type == self.ChangeType.ATTR:
self.update_attributes(
current_node, v_current_node["attributes"], v_new_node["attributes"]
)
if not self.is_v_node(v_current_node) or not self.is_v_node(v_new_node):
return
for i in range(
max([len(v_current_node["children"]), len(v_new_node["children"])])
):
v_current_node_child = (
v_current_node["children"][i]
if i < len(v_current_node["children"])
else None
)
v_new_node_child = (
v_new_node["children"][i] if i < len(v_new_node["children"]) else None
)
self.update_element(current_node, v_current_node_child, v_new_node_child, i)
def update_attributes(self, target_node, current_attributes, new_attributes):
for attr in list(set(current_attributes.keys()) - set(new_attributes)):
if self.is_event_attr(str(attr)):
continue
target_node.removeAttribute(str(attr))
for attr, value in new_attributes.items():
if (
self.is_event_attr(str(attr))
or current_attributes.get(str(attr)) == value
):
continue
target_node.setAttribute(str(attr), value)
def change_type(self, a, b):
if a.__class__.__name__ != b.__class__.__name__:
return self.ChangeType.TYPE
if not self.is_v_node(a) and a != b:
return self.ChangeType.TEXT
if self.is_v_node(a) and self.is_v_node(b):
if a["node_name"] != b["node_name"]:
return self.ChangeType.NODE
if a["attributes"].get("value") != b["attributes"].get("value"):
return self.ChangeType.VALUE
if a["attributes"] != b["attributes"]:
return self.ChangeType.ATTR
return self.ChangeType.NONE
def is_v_node(self, node):
return isinstance(node, dict)
def is_event_attr(self, attr: str):
return attr.startswith("on")
|
harehare/python-wasm-vdom
|
vdom.py
|
vdom.py
|
py
| 6,230 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42755033612
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
from datetime import datetime
import re
import importlib
import inspect
import logging
import os
import sys
import sphinx
import megengine
# -- Project information -----------------------------------------------------
project = 'MegEngine'
copyright = f'2020-{datetime.now().year}, The MegEngine Open Source Team'
author = 'The MegEngine Open Source Team'
version = megengine.__version__
release = version
# -- General configuration ---------------------------------------------------
extensions = [
'nbsphinx',
'recommonmark',
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.graphviz',
'sphinxcontrib.mermaid',
'sphinx_autodoc_typehints',
'sphinx_copybutton'
]
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
source_encoding = "utf-8"
master_doc = 'index'
templates_path = ['_templates']
exclude_patterns = [
'_build',
'build',
'examples',
'**/includes/**',
'**.ipynb_checkpoints'
]
# -- Options for internationalization ----------------------------------------
language = 'zh_CN'
# By default, the document `functional/loss.rst` ends up in the `functional` text domain.
# With this option set to False, it is `functional/loss`.
gettext_compact = False
# -- Options for Extensions -------------------------------------------------
# Setting for sphinx.ext.autosummary to auto-generate single html pages
# Please makesure all api pages are stored in `/refenrece/api/` directory
autosummary_generate = True
# Setting for sphinx.ext.auotdoc
autodoc_default_options = {
'member-order': 'bysource', # Need developer organize the source code
'show-inheritance': True, # But it can not refer the short module path
}
autoclass_content = 'class'
autodoc_typehints = 'description'
autodoc_docstring_signature = True
add_function_parentheses = False
add_module_names = False
# Setting for sphinx.ext.mathjax
# The path to the JavaScript file to include in the HTML files in order to load MathJax.
mathjax_path = 'https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js'
mathjax_config = {
'extensions': ['tex2jax.js'],
'jax': ['input/TeX', 'output/HTML-CSS'],
}
# Setting for sphinxcontrib-mermaid
mermaid_version = 'latest' # from CDN unpkg.com
# Setting for sphinx.ext.intersphinx
# Useful for refenrece other projects, eg. :py:class:`zipfile.ZipFile`
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://numpy.org/doc/stable/', None),
}
# Setting for sphinx.ext.extlinks
# Can use the alias name as a new role, e.g. :issue:`123`
extlinks = {
'src': ('https://github.com/MegEngine/MegEngine/blob/master/%s', ''),
'docs': ('https://github.com/MegEngine/Documentation/blob/master/%s', ''),
'issue': ('https://github.com/MegEngine/MegEngine/issues/%s', 'Issue #'),
'pull': ('https://github.com/MegEngine/MegEngine/pull/%s', 'Pull Requset #'),
'duref': ('http://docutils.sourceforge.net/docs/ref/rst/'
'restructuredtext.html#%s', ''),
}
# Setting for sphinx.ext.nbsphinx
# nbsphinx do not use requirejs (breaks bootstrap)
nbsphinx_requirejs_path = ""
logger = logging.getLogger(__name__)
try:
import nbconvert
except ImportError:
logger.warning("nbconvert not installed. Skipping notebooks.")
exclude_patterns.append("**/*.ipynb")
else:
try:
nbconvert.utils.pandoc.get_pandoc_version()
except nbconvert.utils.pandoc.PandocMissing:
logger.warning("Pandoc not installed. Skipping notebooks.")
exclude_patterns.append("**/*.ipynb")
# -- Options for HTML output -------------------------------------------------
html_theme = 'pydata_sphinx_theme'
html_theme_path = ['_themes']
html_theme_options = {
'search_bar_text': '输入搜索文本...',
'search_bar_position': 'navbar',
'github_url': 'https://github.com/MegEngine/MegEngine',
'external_links': [
{ 'name': '论坛', 'url': 'https://discuss.megengine.org.cn/'},
{ 'name': '官网', 'url': 'https://megengine.org.cn/'}
],
'use_edit_page_button': False,
'navigation_with_keys': False,
'show_prev_next': False,
'use_version_switch': True,
'version_switch_json_url': '/doc/version.json',
'version_switch_enable_locale': True,
'version_switch_locates': ['zh', 'en']
}
html_sidebars = {
'**': ['sidebar-search-bs.html', 'sidebar-nav-bs.html'],
'index': ['sidebar-search-bs.html', 'homepage-sidebar.html']
}
html_static_path = ['_static']
html_logo = "logo.png"
html_favicon = "favicon.ico"
html_css_files = [
'css/custom.css'
]
html_js_files = [
'js/custom.js'
]
html_search_language = 'zh'
|
tpoisonooo/Documentation
|
source/conf.py
|
conf.py
|
py
| 5,214 |
python
|
en
|
code
| null |
github-code
|
6
|
71477050428
|
import sys
input = sys.stdin.readline
n, k = map(int, input().split())
data = list(map(int, input().split()))
ans = 0
start = 0
end = 20*n
while start <= end:
# 그룹들의 최소가 mid인가?
mid = (start + end) // 2
my_sum = 0
count = 0
for i in range(n):
my_sum += data[i]
if my_sum >= mid:
count += 1
my_sum = 0
if count >= k:
start = mid + 1
ans = mid
else:
end = mid - 1
print(ans)
|
YOONJAHYUN/Python
|
BOJ/17951.py
|
17951.py
|
py
| 490 |
python
|
en
|
code
| 2 |
github-code
|
6
|
22912396559
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from flask import Flask, request, jsonify, make_response, abort
import requests
from stations import stations
app = Flask(__name__)
name = [
'station_train_code',
'from_station_name',
'to_station_name',
'lishi',
'start_time',
'arrive_time',
'swz_num',
'tz_num',
'zy_num',
'ze_num',
'gr_num',
'rw_num',
'yw_num',
'rz_num',
'yz_num',
'wz_num',
'qt_num']
@app.route('/zd')
def zd_tickets():
tickets = []
date = request.args.get('Date')
from_station = request.args.get('from')
to_station = request.args.get('to')
if from_station in stations.keys() and to_station in stations.keys():
from_station = stations[from_station]
to_station = stations[to_station]
else:
abort(400)
url = 'https://kyfw.12306.cn/otn/lcxxcx/query?purpose_codes=ADULT&queryDate={}&from_station={}&to_station={}'.\
format(date, from_station, to_station)
r = requests.get(url, verify=False)
# if the url param 'date'was supplied incorrectly, 12306 would return -1 of int type.
if r.json() == -1:
abort(400)
if 'datas' in r.json()['data']:
contents = r.json()['data']['datas']
# get out the information that we want in the contents dict.
for content in contents:
ticket = {key: content[key] for key in name}
tickets.append(ticket)
else:
abort(404)
return jsonify({'tickets': tickets})
@app.route('/hc')
def hc_tickets():
tickets_1 = []
tickets_2 = []
date = request.args.get('Date')
from_station = request.args.get('from')
to_station = request.args.get('to')
changed_station = request.args.get('change')
if from_station in stations.keys() and to_station in stations.keys() and changed_station in stations.keys():
from_station = stations[from_station]
to_station = stations[to_station]
changed_station = stations[changed_station]
else:
abort(400)
url1 = 'https://kyfw.12306.cn/otn/lcxxcx/query?purpose_codes=ADULT&queryDate={}&from_station={}&to_station={}'. \
format(date, from_station, changed_station)
r1 = requests.get(url1, verify=False)
if r1.json() == -1:
abort(400)
if 'datas' in r1.json()['data']:
contents_1 = r1.json()['data']['datas']
for content in contents_1:
ticket = {key: content[key] for key in name}
tickets_1.append(ticket)
else:
abort(404)
for ticket in tickets_1:
if int(ticket['lishi'][:2]) + int(ticket['start_time'][:2]) < 24: # TODO
url2 = 'https://kyfw.12306.cn/otn/lcxxcx/query?purpose_codes=ADULT' \
'&queryDate={}&from_station={}&to_station={}'. \
format(date, changed_station, to_station)
r2 = requests.get(url2, verify=False)
if r2.json() == -1:
abort(400)
if 'datas' in r2.json()['data']:
contents_2 = r2.json()['data']['datas']
for content in contents_2:
ticket = {key: content[key] for key in name}
tickets_2.append(ticket)
if tickets_2:
for x in tickets_1:
x['changed_ticket'] = [y for y in tickets_2 if
1 < int(y['start_time'][:2]) - int(x['arrive_time'][:2]) < 3]
else:
abort(404)
else:
# if the first train arrived at next day, we add one day to the queryDate param.
date2 = str(datetime.strptime(date, '%Y-%m-%d') + timedelta(days=1))[:10]
# TODO: the following repeatedly codes should be moved to a helper method.
url2 = 'https://kyfw.12306.cn/otn/lcxxcx/query?purpose_codes=ADULT' \
'&queryDate={}&from_station={}&to_station={}'. \
format(date2, changed_station, to_station)
r2 = requests.get(url2, verify=False)
if r2.json() == -1:
abort(400)
if 'datas' in r2.json()['data']:
contents_2 = r2.json()['data']['datas']
for content in contents_2:
ticket = {key: content[key] for key in name}
tickets_2.append(ticket)
if tickets_2:
for x in tickets_1:
x['changed_ticket'] = [y for y in tickets_2 if
1 < int(y['start_time'][:2]) - int(x['arrive_time'][:2]) < 3]
return jsonify({'tickets': tickets_1})
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.errorhandler(400)
def bad_request(error):
return make_response(jsonify({'error': 'Please check your query param format'}), 400)
if __name__ == '__main__':
app.run(debug=True)
|
shenmj053/querytickets
|
tickets.py
|
tickets.py
|
py
| 5,024 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26524076638
|
import math
def calculatein(main_list, dictionary, imm, non_imm):
time_loss = 0
for f, l in imm:
fir = main_list.index(f)
sec = main_list.index(l)
order = [fir, sec]
order.sort()
sub_list = main_list[order[0]+1: order[1]+1]
for i in range(0, len(sub_list)):
sub_list[i] = dictionary[sub_list[i]]
time_loss += sum(sub_list)
for f, l in non_imm:
fir = main_list.index(f)
sec = main_list.index(l)
order = [fir, sec]
order.sort()
sub_list = main_list[order[0]+1: order[1]+1]
for i in range(0, len(sub_list)):
sub_list[i] = dictionary[sub_list[i]]
time_loss += 15-sum(sub_list)
return time_loss
condition = input('enter condition(works, prerequired, imm, non_imm): ')
condition = condition.strip().split()
print(condition)
n_work = int(condition[0])
n_prereq = int(condition[1])
n_imm = int(condition[2])
n_non_imm = int(condition[3])
work_time = input('enter works required times: ')
work_time = work_time.strip().split()
print(work_time)
string = [str(i) for i in range(1, 16)]
print(string)
count = 0
work_dict = dict()
for time in work_time:
work_dict[string[count]] = int(time)
count += 1
print(work_dict)
prereq = []
for x in range(0, int(n_prereq)):
pr = input('enter prerequirments: ')
pr = pr.strip().split()
prereq.append((pr[0], pr[1]))
print(prereq)
imm = []
for x in range(0, int(n_imm)):
imd = input('enter immediate works: ')
imd = imd.strip().split()
imm.append((imd[0], imd[1]))
print(imm)
non_imm = []
for x in range(0, int(n_non_imm)):
nimd = input('enter non-immediate: ')
nimd = nimd.strip().split()
non_imm.append((nimd[0], nimd[1]))
print(non_imm)
def sort_prereq(main_list, prereq):
pass
def sort_imm(main_list, imm):
pass
def sort_non_imm(main_list, non_imm):
pass
with open('/tmp/all.list', 'a') as file:
elements = [i for i in list(work_dict.keys())]
print(elements)
for x in elements:
count = math.factorial(len(elements) - 1)
print(count)
# یک راه پیدا کردم که شاید جواب بده.
# این که لیست های مطابق پیش نیاز ها رو جدا کنم و یکی که
# کمترین ضرر رو به بار میاره رو جدا کنم
|
mortezatajerii/projects
|
Python/quera-program programmer.py
|
quera-program programmer.py
|
py
| 2,368 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18956703257
|
from datetime import datetime, timezone
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
import logging
import os
import mongo_client
from typing import Optional, List, Union
import random
client = WebClient(token=os.environ.get("SLACK_TOKEN"))
good_words_collection = mongo_client.get_good_words_collection()
EMOJIS = os.environ.get("VALID_EMOJIS").split(' ')
def add_historical_goodwords():
# Call the conversations.list method using the WebClient
result = client.conversations_history(channel="C0441R6SKBN")
conversation_history = result["messages"]
for message in conversation_history:
word = message['text']
date_millis = float(message['ts'])
user_id = message['user']
temp_list = list(filter(lambda a: len(a) > 0, word.split(" ")))
if len(temp_list) == 1:
handle_word_sent(temp_list[0], date_millis, user_id, True)
def process_event(event: object):
if event.get('text', False) and event.get('ts', False) and event.get('user', False):
if event.get('thread_ts', False):
print(f"Replies to posts not accepted.")
return
message = event['text']
millis_time = float(event['ts'])
user = event['user']
channel = event['channel']
temp_list = list(filter(lambda a: len(a) > 0, message.split(" ")))
if len(temp_list) > 1 or channel != "C0441R6SKBN":
print(f"invalid submission: {temp_list}")
else:
handle_word_sent(temp_list[0], millis_time, user)
else:
print(f"Event missing attribute ts or text: {event}")
def handle_word_sent(word: str, millis_time: float, user_id: str, historical: bool=False):
prev_sent = find_word(word)
if prev_sent is not None:
if not historical:
client.chat_postMessage(channel="C0441R6SKBN", text=f"{word} was previously sent on {datetime.fromtimestamp(prev_sent['date_millis']).strftime('%m/%d/%Y')}", thread_ts=str(millis_time))
print(f"Thread Time: {datetime.fromtimestamp(prev_sent['date_millis']).strftime('%m/%d/%Y')}, Prev Sent Word: {word}")
elif not historical:
insert_new_word(word, millis_time, user_id)
client.reactions_add(channel="C0441R6SKBN", name=random.choice(EMOJIS), timestamp=str(millis_time))
else:
insert_new_word(word, millis_time, user_id)
def insert_new_word(word: str, date_millis: float, user: str):
word_lowercase = word.lower()
document = {
"word": word_lowercase,
"date_millis": date_millis,
"user_id": user
}
good_words_collection.insert_one(document)
print(f"Successfully added word: \n {document['word']} \n millis: {document['date_millis']}")
def find_word(word: str):
result = good_words_collection.find_one({"word": word.lower()})
print(f"Found: {result}")
return result
|
isaacson-f/slack-bots
|
goodwords_service.py
|
goodwords_service.py
|
py
| 2,887 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5033666757
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.db import transaction
from django.template import loader
from django.core.exceptions import ValidationError
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from django.core.mail import send_mail, BadHeaderError
from django.contrib.auth.decorators import login_required
from .forms import UserForm, RegisterForm, UserProfileForm, ContactForm
from .models import UserProfile, Event
def index(request):
template = loader.get_template("help/index.html")
return HttpResponse(template.render(request=request))
@transaction.atomic
def register(request):
registered = False
if request.method == "POST":
user_form = RegisterForm(data=request.POST)
userprofile_form = UserProfileForm(data=request.POST)
if user_form.is_valid() and userprofile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
phone = userprofile_form.cleaned_data.get("phone")
userprofile = UserProfile.objects.filter(user_id=user.id)
userprofile.update(phone=phone)
registered = True
else:
messages.error(request, (
'Veuillez corriger les erreurs ci-dessous.'))
else:
user_form = RegisterForm()
userprofile_form = UserProfileForm()
return render(
request, "help/registration.html", {
"user_form": user_form,
"userprofile_form": userprofile_form,
"registered": registered}
)
def logout2(request):
logout(request)
return redirect(reverse("index"))
@login_required()
def update_event(request):
if request.method == "POST":
id = request.POST['event_id']
event = Event.objects.filter(id=id)
event.update(status="closed")
return redirect(reverse('profile'))
else:
user_form = RegisterForm()
userprofile_form = UserProfileForm()
return render(request, "help/profile.html")
def contact(request):
send = False
email = []
contact_form = ContactForm()
if request.method == "POST":
subject = "demande d'info"
from_email = "needhelp_contact"
email.append(request.POST['Email'])
body = {
'name': request.POST['Nom'],
'email': request.POST['Email'],
'phone': request.POST['Mobile'],
'message': request.POST['Message'],
}
message = "\n".join(body.values())
try:
send_mail(
subject,
message,
from_email,
email
)
except BadHeaderError:
return HttpResponse('Invalid header found.')
send = True
# return redirect(reverse('contact'))
else:
contact_form = ContactForm()
return render(
request, "help/contact.html",
{
'contact_form': contact_form,
'send': send
})
|
davidbarat/P13
|
needhelp/help/views.py
|
views.py
|
py
| 3,219 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28395924014
|
import os
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
class AnimeDataset(Dataset):
def __init__(self, dataset_path, image_size):
self.transform = transforms.Compose([
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
self.paths = [os.path.join(dataset_path, name) for name in os.listdir(dataset_path)]
def __getitem__(self, item):
image = Image.open(self.paths[item])
data = self.transform(image)
return data
def __len__(self):
return len(self.paths)
class LossWriter:
def __init__(self, save_path):
self.save_path = save_path
def add(self, loss, i):
with open(self.save_path, mode="a") as f:
term = str(i) + " " + str(loss) + "\n"
f.write(term)
f.close()
def recover_image(img):
return (
(img.numpy() *
np.array([0.5, 0.5, 0.5]).reshape((1, 3, 1, 1)) +
np.array([0.5, 0.5, 0.5]).reshape((1, 3, 1, 1))
).transpose(0, 2, 3, 1) * 255
).clip(0, 255).astype(np.uint8)
|
cwpeng-cn/DCGAN
|
data.py
|
data.py
|
py
| 1,276 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8528358737
|
"""
crown.py
COMP9444, CSE, UNSW
"""
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
# the data for this task has three columns: x y and class.
# the input of nn will be x and y, and the output will be a binary class.
class Full3Net(torch.nn.Module):
# assume we have a linear nn here:
def __init__(self, hid=3):
super(Full3Net, self).__init__()
# define the structure of the nn
# define the first hidden layer: size of in feature is 2 and size of out feature is define by variable hid
self.hidden1 = nn.Linear(2, hid)
# define the second hidden layer: size of in feature is hid and size of out feature is hid
self.hidden2 = nn.Linear(hid, hid)
# define the third layer: the size of input is hid from layer 2, the size of output is 1
self.hidden3 = nn.Linear(hid, 1)
def forward(self, input):
# assume we are having a linear nn.
# calculate the linear sum of the weight with the input:
sum1 = self.hidden1(input)
# apply the activation function: tanh
self.hid1 = torch.tanh(sum1)
# calculate the linear sum of the weight with the first hidden layer output after activation
sum2 = self.hidden2(self.hid1)
# apply the activation function: tanh
self.hid2 = torch.tanh(sum2)
# compute the sum for the final layer
out_sum = self.hidden3(self.hid2)
# apply the activation function: sigmoid
output = torch.sigmoid(out_sum)
return output
class Full4Net(torch.nn.Module):
def __init__(self, hid):
super(Full4Net, self).__init__()
def forward(self, input):
self.hid1 = None
self.hid2 = None
self.hid3 = None
return 0*input[:,0]
class DenseNet(torch.nn.Module):
def __init__(self, num_hid):
super(DenseNet, self).__init__()
def forward(self, input):
self.hid1 = None
self.hid2 = None
return 0*input[:,0]
|
sijinwnag/COMP9444_HW1
|
hw1/crown.py
|
crown.py
|
py
| 2,002 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12066066456
|
from random import randint
# n = randint(0, 100)
n = 71
playing = True
count = 0
while playing:
guess = int(input('Guess my number (0-100)? '))
if guess > n:
print('Too big')
elif guess < n:
print('Too small')
else:
print('bingo')
break
count += 1
if count == 7:
print('You lose')
break
|
paty0504/nguyentienthanh-fundamental-c4e13
|
ss3/test.py
|
test.py
|
py
| 360 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39189785619
|
"""
Example of the FrequentistSurface plot.
Usage: surf_plot.py FILE
where FILE is a file containing Surface to be plotted. The surface is expected
to be found in the `/surface` directory of the FILE.
"""
import sys
import matplotlib.pyplot as plt
from cafplot import load
from cafplot.plot.surface import (
plot_surface, plot_surface_best_fit, plot_surface_gauss_contour
)
root_file = load(sys.argv[1])
surface = root_file.get_fsurface('surface')
f, ax = plt.subplots()
im = plot_surface(ax, surface)
plot_surface_best_fit(ax, surface, color = 'red', marker = '*')
plot_surface_gauss_contour(
ax, surface, sigma = 1, color = 'red', label = r'1$\sigma$'
)
ax.set_xlabel(r'$\sin^2 \theta_{23}$')
ax.set_ylabel(r'$\Delta m^2_{32}$')
ax.legend()
f.colorbar(im)
plt.show()
|
usert5432/cafplot
|
examples/surf_plot.py
|
surf_plot.py
|
py
| 791 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75145119867
|
A, B = map(int,input().split())
C = int(input())
hour = (B+C)//60 # 몫이므로 (30+20)//60= 0 이렇게 더함
min = (B+C)%60 # 나머지
if (B+C >= 60):
if (A+hour >= 24): # 24시간 넘어갈 경우 24시간 빼줘서 0부터 시작하게 만들어줌
A = A-24
A = A + hour # 뺴준 값에다 다시 hour 더해주기
print(A, min)
else: # B+C < 60
if(A >= 24):
A = A-24
print(A, B+C)
|
zlnongi/Algorithm
|
baekjoon17.py
|
baekjoon17.py
|
py
| 429 |
python
|
ko
|
code
| 1 |
github-code
|
6
|
24347960900
|
import numpy as np
import os
from .image_extraction import extract_images_from_pdf
from .images import get_box
from PIL import Image
from flask import current_app
def reference_image(exam_id, page, dpi, widget_area_in=None, padding=0):
"""Returns a reference image for a specified area
The reference image is a flattened image of the
problem on the original PDF
Parameters
----------
exam_id : int
The id of the exam to use
page : int
The page number to get the reference image for
dpi : int
The desired DPI of the image
widget_area_in : numpy array
The widget coordinates as numpy array
If None, return the full page
padding : float
Extra padding to apply in inches
Returns
-------
image_path : string
Location of the image.
"""
app_config = current_app.config
data_directory = app_config["DATA_DIRECTORY"]
generated_path = os.path.join(data_directory, f"{exam_id}_data", "blanks", f"{dpi}")
if not os.path.exists(generated_path):
_extract_reference_images(dpi, exam_id)
image_path = os.path.join(generated_path, f"page{page:02d}.jpg")
if not os.path.exists(image_path):
_extract_reference_images(dpi, exam_id)
blank_page = Image.open(image_path)
blank_img_array = np.array(blank_page)
if widget_area_in is not None:
return get_box(blank_img_array, widget_area_in, padding=padding)
else:
return blank_img_array
def _extract_reference_images(dpi, exam_id):
"""Extract and save reference images for the specified exam
Saves the images at:
{data_directory}/{exam_id}_data/blanks/{dpi}/page{page}.jpg
Parameters
----------
dpi : int
The desired DPI for the extracted images
exam_id : int
The id of the desired exam
"""
data_directory = current_app.config["DATA_DIRECTORY"]
output_directory = os.path.join(data_directory, f"{exam_id}_data")
pdf_path = os.path.join(output_directory, "exam.pdf")
pages = extract_images_from_pdf(pdf_path, dpi=dpi)
for page, (image, _) in enumerate(pages, start=1):
_save_image(image, page, dpi, output_directory)
def _save_image(image, page, dpi, output_directory):
"""Save an image at an appropriate location.
Saves the images at:
{output_directory}/blanks/{dpi}/page{page}.jpg
Parameters
----------
image : PIL Image
Image data.
page : int
The corresponding page number, starting at 1.
dpi : int
The DPI of the image to save.
output_directory : path
The output directory of the exam the page is from.
Returns
-------
image_path : string
Location of the image.
"""
submission_path = os.path.join(output_directory, "blanks", f"{dpi}")
os.makedirs(submission_path, exist_ok=True)
image_path = os.path.join(submission_path, f"page{page-1:02d}.jpg")
image.save(image_path)
return image_path
|
zesje/zesje
|
zesje/blanks.py
|
blanks.py
|
py
| 3,019 |
python
|
en
|
code
| 9 |
github-code
|
6
|
24955814708
|
import shutil
import pytest
from repo2rocrate.snakemake import find_workflow, get_lang_version, make_crate
SNAKEMAKE_ID = "https://w3id.org/workflowhub/workflow-ro-crate#snakemake"
def test_find_workflow(tmpdir):
root = tmpdir / "snakemake-repo"
workflow_dir = root / "workflow"
workflow_dir.mkdir(parents=True)
with pytest.raises(RuntimeError):
find_workflow(root)
wf_path = workflow_dir / "Snakefile"
wf_path.touch()
assert find_workflow(root) == wf_path
new_wf_path = root / "Snakefile"
shutil.move(wf_path, new_wf_path)
assert find_workflow(root) == new_wf_path
def test_get_lang_version(tmpdir):
v = "0.1.0"
wf_path = tmpdir / "Snakefile"
for arg_part in f'("{v}")', f"( '{v}')":
with open(wf_path, "wt") as f:
f.write(f"# comment\nfrom x import y\nmin_version{arg_part}\n")
assert get_lang_version(wf_path) == v
@pytest.mark.parametrize("defaults", [False, True])
def test_make_crate(data_dir, defaults):
repo_name = "fair-crcc-send-data"
root = data_dir / repo_name
repo_url = f"https://github.com/crs4/{repo_name}"
kwargs = {"repo_url": repo_url}
if defaults:
wf_path = root / "workflow" / "Snakefile"
wf_name = repo_name
wf_version = None
lang_version = "6.5.0"
license = None
ci_workflow = "main.yml"
diagram = "images/rulegraph.svg"
else:
wf_path = root / "pyproject.toml"
wf_name = "spam/bar"
wf_version = "0.9.0"
lang_version = "99.9.9"
license = "GPL-3.0"
ci_workflow = "release-please.yml"
diagram = "images/rulegraph.dot"
kwargs.update(
workflow=wf_path,
wf_name=wf_name,
wf_version=wf_version,
lang_version=lang_version,
license=license,
ci_workflow=ci_workflow,
diagram=diagram,
)
crate = make_crate(root, **kwargs)
if license:
assert crate.root_dataset["license"] == license
# workflow
workflow = crate.mainEntity
assert workflow.id == str(wf_path.relative_to(root))
assert workflow["name"] == crate.root_dataset["name"] == wf_name
if wf_version:
assert workflow["version"] == wf_version
image = crate.get(diagram)
assert image
assert set(image.type) == {"File", "ImageObject"}
assert workflow["image"] is image
language = workflow["programmingLanguage"]
assert language.id == SNAKEMAKE_ID
assert language["version"] == lang_version
assert workflow["url"] == crate.root_dataset["isBasedOn"] == repo_url
# workflow testing metadata
suite = crate.root_dataset["mentions"]
assert suite
if isinstance(suite, list):
assert len(suite) == 1
suite = suite[0]
assert suite.type == "TestSuite"
assert suite["mainEntity"] is workflow
instance = suite["instance"]
assert instance
if isinstance(instance, list):
assert len(instance) == 1
instance = instance[0]
assert instance.type == "TestInstance"
assert instance["url"] == "https://api.github.com"
assert instance["resource"] == f"repos/crs4/{repo_name}/actions/workflows/{ci_workflow}"
# layout
expected_data_entities = [
("LICENSE", "File", ""),
("README.md", "File", ""),
("config", "Dataset", "Configuration folder"),
(".tests/integration", "Dataset", "Integration tests for the workflow"),
("workflow/rules", "Dataset", "Workflow rule modules"),
("workflow/schemas", "Dataset", "Validation files"),
("workflow/scripts", "Dataset", "Scripts folder"),
]
for relpath, type_, desc in expected_data_entities:
entity = crate.get(relpath)
assert entity, f"{relpath} not listed in crate metadata"
assert entity.type == type_
if desc:
assert entity["description"] == desc
|
crs4/repo2rocrate
|
test/test_snakemake.py
|
test_snakemake.py
|
py
| 3,932 |
python
|
en
|
code
| 1 |
github-code
|
6
|
2785515345
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import logging as log
from time import sleep
from html import escape
import requests
class FloodException(Exception): pass
def send(config, post):
# prepare config
c = {'token': '', 'chat': '', 'maxlength': 1000, 'skip': [], 'censor': []}
c.update(config)
if not c['token'] or not c['chat']:
raise Exception('[Telegram] missing parameter')
# check key words
if any((x in post['title']) for x in c['skip']):
log.info('[Telegram] skip word hit')
return
if any((x in post['content']) for x in c['censor']):
log.info('[Telegram] censor word hit')
post['content'] = ''
# fixes email addresses and links
post['content'] = re.sub(r'([!-~]+\@[!-~]+)', ' \\1 ', post['content'])
post['content'] = re.sub(r'(https?://[!-~]+)', '\\1 ', post['content'])
# form message
html = f'<b>{escape(post["title"])}</b>\n'
html += f'{post["time"]} #{post["dept"]}\n'
html += f'<a href="{post["linkLAN"]}">校内链接</a> <a href="{post["linkVPN"]}">VPN链接</a>\n\n'
html += f'{escape(post["content"])}'
if len(html) > c['maxlength']: html = html[:c['maxlength']] + '...'
log.debug(f'[Telegram] html: {html}')
# call HTTP API
while True:
try:
r = requests.post('https://api.telegram.org/bot'+c['token']+'/sendMessage', json={
'chat_id': c['chat'],
'text': html,
'parse_mode': 'HTML',
'disable_web_page_preview': True
}, timeout=5)
log.debug(f'[Telegram] response: {r.text}')
if not r.json()['ok']:
if r.json()['error_code'] == 429:
raise FloodException()
else:
raise Exception(r.json()['error_code'])
break
except FloodException:
log.warning('[Telegram] hit rate limit!')
sleep(30)
except requests.exceptions.RequestException as e:
log.info('[Telegram] network error')
log.info(repr(e))
sleep(5)
except Exception as e:
log.error('[Telegram] unknown error')
log.error(repr(e))
sleep(60)
|
TechCiel/Reachee
|
sender/telegram.py
|
telegram.py
|
py
| 2,274 |
python
|
en
|
code
| 11 |
github-code
|
6
|
10422651353
|
from __future__ import annotations
import copy
from dataclasses import dataclass
from enum import Enum
from typing import TYPE_CHECKING, Self
from randovania.bitpacking import bitpacking
from randovania.bitpacking.bitpacking import BitPackDecoder, BitPackEnum, BitPackValue
from randovania.bitpacking.type_enforcement import DataclassPostInitTypeCheck
from randovania.game_description import default_database
from randovania.game_description.db.dock import DockType, DockWeakness
from randovania.games.game import RandovaniaGame
from randovania.lib import enum_lib
if TYPE_CHECKING:
from collections.abc import Iterator
from randovania.game_description.db.dock import DockWeaknessDatabase
class DockRandoMode(BitPackEnum, Enum):
long_name: str
description: str
VANILLA = "vanilla"
DOCKS = "docks"
WEAKNESSES = "weaknesses"
enum_lib.add_long_name(
DockRandoMode,
{
DockRandoMode.VANILLA: "Vanilla",
DockRandoMode.DOCKS: "Doors",
DockRandoMode.WEAKNESSES: "Types",
},
)
enum_lib.add_per_enum_field(
DockRandoMode,
"description",
{
DockRandoMode.VANILLA: "Original door locks",
DockRandoMode.DOCKS: "Randomize the type of each door individually",
DockRandoMode.WEAKNESSES: "Randomizes all doors by type, turning all of one type into another",
},
)
@dataclass(frozen=True)
class DockTypeState(BitPackValue, DataclassPostInitTypeCheck):
game: RandovaniaGame
dock_type_name: str
can_change_from: set[DockWeakness]
can_change_to: set[DockWeakness]
@staticmethod
def _get_weakness_database(game: RandovaniaGame) -> DockWeaknessDatabase:
return default_database.game_description_for(game).dock_weakness_database
@property
def weakness_database(self) -> DockWeaknessDatabase:
return self._get_weakness_database(self.game)
@property
def dock_type(self) -> DockType:
return self.weakness_database.find_type(self.dock_type_name)
@property
def can_shuffle(self) -> bool:
return len(self.can_change_from) > 0
@property
def as_json(self) -> dict:
return {
"can_change_from": sorted(weakness.name for weakness in self.can_change_from),
"can_change_to": sorted(weakness.name for weakness in self.can_change_to),
}
@classmethod
def from_json(cls, value: dict, game: RandovaniaGame, dock_type_name: str) -> DockTypeState:
weakness_database = cls._get_weakness_database(game)
return cls(
game=game,
dock_type_name=dock_type_name,
can_change_from={
weakness_database.get_by_weakness(dock_type_name, weakness) for weakness in value["can_change_from"]
},
can_change_to={
weakness_database.get_by_weakness(dock_type_name, weakness) for weakness in value["can_change_to"]
},
)
def bit_pack_encode(self, metadata) -> Iterator[tuple[int, int]]:
yield from bitpacking.pack_sorted_array_elements(
sorted(self.can_change_from),
sorted(self.possible_change_from),
)
yield from bitpacking.pack_sorted_array_elements(
sorted(self.can_change_to),
sorted(self.possible_change_to),
)
@classmethod
def bit_pack_unpack(cls, decoder: BitPackDecoder, metadata) -> DockTypeState:
reference: DockTypeState = metadata["reference"]
ref_change_from = sorted(cls._possible_change_from(reference.game, reference.dock_type_name))
ref_change_to = sorted(cls._possible_change_to(reference.game, reference.dock_type_name))
return cls(
game=reference.game,
dock_type_name=reference.dock_type_name,
can_change_from=set(bitpacking.decode_sorted_array_elements(decoder, ref_change_from)),
can_change_to=set(bitpacking.decode_sorted_array_elements(decoder, ref_change_to)),
)
@staticmethod
def _possible_change_from(game: RandovaniaGame, dock_type_name: str) -> Iterator[DockWeakness]:
weakness_database = DockTypeState._get_weakness_database(game)
yield from weakness_database.dock_rando_params[weakness_database.find_type(dock_type_name)].change_from
@property
def possible_change_from(self) -> Iterator[DockWeakness]:
yield from self._possible_change_from(self.game, self.dock_type_name)
@staticmethod
def _possible_change_to(game: RandovaniaGame, dock_type_name: str) -> Iterator[DockWeakness]:
weakness_database = DockTypeState._get_weakness_database(game)
yield from weakness_database.dock_rando_params[weakness_database.find_type(dock_type_name)].change_to
@property
def possible_change_to(self) -> Iterator[DockWeakness]:
yield from self._possible_change_to(self.game, self.dock_type_name)
@dataclass(frozen=True)
class DockRandoConfiguration(BitPackValue, DataclassPostInitTypeCheck):
game: RandovaniaGame
mode: DockRandoMode
types_state: dict[DockType, DockTypeState]
@staticmethod
def _get_weakness_database(game: RandovaniaGame) -> DockWeaknessDatabase:
return default_database.game_description_for(game).dock_weakness_database
@property
def weakness_database(self) -> DockWeaknessDatabase:
return self._get_weakness_database(self.game)
@property
def as_json(self) -> dict:
return {
"mode": self.mode.value,
"types_state": {
dock_type.short_name: type_state.as_json for dock_type, type_state in self.types_state.items()
},
}
@classmethod
def from_json(cls, value: dict, game: RandovaniaGame) -> Self:
weakness_database = cls._get_weakness_database(game)
return cls(
game=game,
mode=DockRandoMode(value["mode"]),
types_state={
weakness_database.find_type(dock_type): DockTypeState.from_json(type_state, game, dock_type)
for dock_type, type_state in value["types_state"].items()
},
)
def bit_pack_encode(self, metadata) -> Iterator[tuple[int, int]]:
reference: DockRandoConfiguration = metadata["reference"]
yield from self.mode.bit_pack_encode(None)
modified_types = sorted(
dock_type
for dock_type, type_state in self.types_state.items()
if type_state != reference.types_state[dock_type]
)
yield from bitpacking.pack_sorted_array_elements(modified_types, sorted(self.weakness_database.dock_types))
for dock_type in modified_types:
yield from self.types_state[dock_type].bit_pack_encode({"reference": reference.types_state[dock_type]})
@classmethod
def bit_pack_unpack(cls, decoder: BitPackDecoder, metadata) -> Self:
reference: DockRandoConfiguration = metadata["reference"]
mode = DockRandoMode.bit_pack_unpack(decoder, None)
modified_types = bitpacking.decode_sorted_array_elements(
decoder, sorted(reference.weakness_database.dock_types)
)
types_state = copy.copy(reference.types_state)
for dock_type in modified_types:
types_state[dock_type] = DockTypeState.bit_pack_unpack(
decoder, {"reference": reference.types_state[dock_type]}
)
return cls(
game=reference.game,
mode=mode,
types_state=types_state,
)
def is_enabled(self) -> bool:
return self.mode != DockRandoMode.VANILLA
def can_shuffle(self, dock_type: DockType) -> bool:
return dock_type in self.weakness_database.dock_rando_params and self.types_state[dock_type].can_shuffle
def settings_incompatible_with_multiworld(self) -> list[str]:
danger = []
if self.mode == DockRandoMode.DOCKS:
danger.append(f"{self.mode.long_name}: {self.mode.description}")
return danger
|
randovania/randovania
|
randovania/layout/base/dock_rando_configuration.py
|
dock_rando_configuration.py
|
py
| 8,008 |
python
|
en
|
code
| 165 |
github-code
|
6
|
11467657062
|
# p161 미로찾기(그래프탐색)
def solve_maze(g, start, end):
qu = []
done = set()
qu.append(start)
done.add(start)
while qu:
p = qu.pop(0)
v = p[-1]
if v == end:
return p
for x in g[v]:
if x not in done:
qu.append(p + x)
done.add(x)
return '?'
maze = {
'a': ['e'],
'b': ['c', 'f'],
'c': ['b', 'd'],
'd': ['c'],
'e': ['a', 'i'],
'f': ['b', 'g', 'j'],
'g': ['f', 'h'],
'h': ['g', 'l'],
'i': ['e', 'm'],
'j': ['f', 'k', 'n'],
'k': ['j', 'o'],
'l': ['h', 'p'],
'm': ['i', 'n'],
'n': ['m', 'j'],
'o': ['k'],
'p': ['l']
}
print(solve_maze(maze, 'a', 'p'))
# p168 가짜 동전 찾기1
def weigh(a, b, c, d):
fake = 14
if a <= fake and fake <= b:
return -1
elif c >= fake and fake <= d:
return 1
return 0
def find_fakecoin(left, right):
for i in range(left+1, right+1):
result = weigh(left, left, i, i)
if result == -1:
return left
elif result == 1:
return i
return '?'
n = 100
print(find_fakecoin(0, n-1))
# p178 가짜 동전 찾기2
def weigh(a, b, c, d):
fake = 25
if a <= fake and fake <= b:
return -1
elif c <= fake and fake <= d:
return 1
return 0
def find_fakecoin(left, right):
if left == right:
return left
half = (right - left + 1) // 2
g1_l = left
g1_r = left + half - 1
g2_l = left + half
g2_r = g2_l + half - 1
result = weigh(g1_l, g1_r, g2_l, g2_r)
if result == -1:
return find_fakecoin(g1_l, g1_r)
elif result == 1:
return find_fakecoin(g2_l, g2_r)
else:
return right
n = 100
print(find_fakecoin(0, n-1))
# p175 주식 최대수익1
def max_profit(prices):
n = len(prices)
max_profit = 0
for i in range(0, n-1):
for j in range(i+1, n):
profit = prices[j] - prices[i]
if profit > max_profit:
max_profit = profit
return max_profit
stock = [10300, 9600, 9800, 8200, 7800, 8300, 9500, 9800, 10200, 9500]
print(max_profit(stock))
# p177 주식 최대수익2
def max_profit(prices):
n = len(prices)
min_price = prices[0]
max_profit = 0
for i in range(1, n):
profit = prices[i] - min_price
if profit > max_profit:
max_profit = profit
if prices[i] < min_price:
min_price = prices[i]
return max_profit
stock = [10300, 9600, 9800, 8200, 7800, 8300, 9500, 9800, 10200, 9500]
print(max_profit(stock))
|
hsyeon4001/algorithm_records
|
Python/교재/5.py
|
5.py
|
py
| 2,747 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23647554795
|
from flask import render_template, request, flash, jsonify
from appInitialize import app, db
from model.client import Client
from model.product import Product
from model.order import Order
import json
@app.route('/')
def index ():
return render_template('layout.html')
#Consultar clientes
@app.route('/read/clients', methods=['GET'])
def readClients ():
clients = Client.query.filter_by(state = 'activo').all()
return render_template('read.html', id = False, records = clients, route = 'client')
@app.route('/api/read/clients', methods=['GET'])
def apiReadClients ():
clients = Client.query.filter_by(state = 'activo').all()
return jsonify([{'name': client.name, 'document': client.document, 'state': client.state, 'created_at': client.created_at} for client in clients])
#Consultar productos
@app.route('/read/products', methods=['GET'])
def readProducts ():
products = Product.query.filter(Product.state != 'inactivo').all()
return render_template('read.html', id = False, records = products, route = 'product')
@app.route('/api/read/products', methods=['GET'])
def apiReadProducts ():
products = Product.query.filter(Product.state != 'inactivo').all()
return jsonify([{'name': product.name, 'document': product.price, 'state': product.state, 'created_at': product.created_at} for product in products])
#Consultar ordenes
@app.route('/read/orders', methods=['GET', 'POST'])
def readOrders ():
if request.method == 'POST':
id = request.form['id']
if id == "true":
clientid = request.form['clientid']
client = Client.query.filter_by(clientid = clientid, state = 'activo').all()
if len(client) == 0:
flash('Cliente no encontrado')
return render_template('read.html', id = True, route = 'order')
else:
orders = Order.query.filter_by(clientid = clientid, state = 'pendiente').join(Client, Order.clientid == Client.clientid and Client.state == 'activo').join(Product, Order.clientid == Product.productid and Product.state != 'inactivo').all()
return render_template('read.html', id = False, records = orders, route = 'order')
return render_template('read.html', id = True, route = 'order')
@app.route('/api/read/orders', methods=['POST'])
def apiReadOrders ():
if request.method == 'POST':
data = json.loads(request.data)
client = Client.query.filter_by(clientid = data['clientid'], state = 'activo').all()
if len(client) == 0:
return "Registro no encontrado", 402
orders = Order.query.filter_by(clientid = data['clientid'], state = 'pendiente').join(Client, Order.clientid == Client.clientid and Client.state == 'activo').join(Product, Order.clientid == Product.productid and Product.state != 'inactivo').all()
return jsonify([{'clientid': order.clientid, 'productid': order.productid, 'quantity': order.quantity, 'total': order.total, 'state': order.state, 'created_at': order.created_at} for order in orders])
#Consultar compras
@app.route('/read/purchases', methods=['GET', 'POST'])
def readPurchases ():
if request.method == 'POST':
id = request.form['id']
if id == "true":
clientid = request.form['clientid']
client = Client.query.filter_by(clientid = clientid, state = 'activo').all()
if len(client) == 0:
flash('Cliente no encontrado')
return render_template('read.html', id = True, route = 'purchase')
else:
orders = Order.query.filter_by(clientid = clientid, state = 'pagada').join(Client, Order.clientid == Client.clientid and Client.state == 'activo').join(Product, Order.clientid == Product.productid and Product.state != 'inactivo').all()
return render_template('read.html', id = False, records = orders, route = 'purchase')
return render_template('read.html', id = True, route = 'purchase')
@app.route('/api/read/purchases', methods=['POST'])
def apiReadPurchases ():
if request.method == 'POST':
data = json.loads(request.data)
client = Client.query.filter_by(clientid = data['clientid'], state = 'activo').all()
if len(client) == 0:
return "Registro no encontrado", 402
orders = Order.query.filter_by(clientid = data['clientid'], state = 'pagada').join(Client, Order.clientid == Client.clientid and Client.state == 'activo').join(Product, Order.clientid == Product.productid and Product.state != 'inactivo').all()
return jsonify([{'clientid': order.clientid, 'productid': order.productid, 'quantity': order.quantity, 'total': order.total, 'state': order.state, 'created_at': order.created_at} for order in orders])
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
cesar-orozco-chr/tienda-online
|
read/app.py
|
app.py
|
py
| 4,862 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30493177090
|
#Seting the Hyper Parameters
param_test1 = {
'max_depth':[3,5,6,10],
'min_child_weight':[3,5,10],
'gamma':[0.0, 0.1, 0.2, 0.3, 0.4],
# 'reg_alpha':[1e-5, 1e-2, 0.1, 1, 10],
'subsample':[i/100.0 for i in range(75,90,5)],
'colsample_bytree':[i/100.0 for i in range(75,90,5)]
}
#Creating the classifier
model_xg = XGBClassifier(random_state=2)
grid_search = GridSearchCV(model_xg, param_grid=param_test1, cv=5, scoring='recall')
grid_search.fit(X_train, y_train)
grid_search.best_score_
grid_search.best_params_
y_pred = grid_search.predict(X_test)
# Verificaar os resultados obtidos
print(accuracy_score(y_test,y_pred))
print("\n")
print(confusion_matrix(y_test, y_pred))
|
garagaby/Kernel-Airflow
|
bin/implementing_pipeline_models.py
|
implementing_pipeline_models.py
|
py
| 681 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7176555609
|
import functools
from typing import (
Any,
Callable,
TypeVar,
cast,
)
import warnings
TFunc = TypeVar("TFunc", bound=Callable[..., Any])
def deprecate_method(func: TFunc, message: str = None) -> TFunc:
@functools.wraps(func)
def deprecated_func(*args: Any, **kwargs: Any) -> Any:
warnings.warn(
category=DeprecationWarning,
message=(
message
or f"{func.__name__} is deprecated. "
"A breaking change is expected in a future release."
),
stacklevel=2,
)
func(*args, **kwargs)
return cast(TFunc, deprecated_func)
|
ethereum/py-evm
|
eth/tools/_utils/deprecation.py
|
deprecation.py
|
py
| 662 |
python
|
en
|
code
| 2,109 |
github-code
|
6
|
40880830024
|
from bs4 import BeautifulSoup
import requests,re,os
import textract
os.chdir("..")
DATA_DIR = "data/fiscal_pdf"
SAVE_DIR = "data/fiscal_txt"
pre_link="http://www.imf.org"
target_dir = "http://www.imf.org/external/np/g20/"
g8_link = "http://www.g8.utoronto.ca/summit/index.htm"
# Extract all IMF Staff Note from IMF website
html = requests.get(target_dir)
soup = BeautifulSoup(html.content, "lxml")
link_imf = soup.findAll("a",text=re.compile('IMF Staff Note to G-20'))
def extract_pdf(r,link,datdir,savedir):
name = link.split("/")[-1].split(".")[0][:6]
name_format = "20"+name[-2:]+name[:2]+name[2:4]
name_pdf = name_format + ".pdf"
name_txt = name_format + ".txt"
save_dir = os.path.join(datdir,name_pdf)
save_txt = os.path.join(savedir,name_txt)
with open(save_dir, 'wb') as f:
f.write(r.content)
text = textract.process(save_dir)
with open(save_txt, 'wb') as f:
f.write(text)
for i in link_imf:
if "pdf" in i["href"]:
if "www" in i["href"]:
r = requests.get(i["href"], stream=True)
extract_pdf(r,i["href"],DATA_DIR,SAVE_DIR)
else:
link_temp=pre_link + i["href"]
r = requests.get(link_temp,stream=True)
extract_pdf(r,i["href"],DATA_DIR,SAVE_DIR)
else:
if "www" in i["href"]:
r = requests.get(i["href"], stream=True)
else:
link_temp=pre_link + i["href"]
r = requests.get(link_temp,stream=True)
temp=BeautifulSoup(r.content, "lxml")
link_temp=temp.find(text=re.compile("Read the"))
link_temp=link_temp.parent.a["href"]
if "external" in link_temp:
link_temp=pre_link+link_temp
else:
link_temp=target_dir+link_temp
r = requests.get(link_temp,stream=True)
extract_pdf(r,link_temp,DATA_DIR,SAVE_DIR)
|
utipe/imf_fiscal
|
code/corpus_extraction.py
|
corpus_extraction.py
|
py
| 1,880 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19365158235
|
import pickle, ssl, logging
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import pytorch_lightning as pl
import models, curves, utils1, utils, preresnet, resnet
DATASET = datasets.CIFAR10
TEST_ITEMS = 50_000
BATCH_SIZE = 128
WORKER_COUNT = 6
EPOCHS = 120
LEARNING_RATE=0.1
LR_GAMMA=0.1
L2_REG = 5e-4
MOMENTUM = 0.9
PARALLEL_MODELS = 1
MODEL = resnet.ResNet18 if DATASET == datasets.CIFAR10 else resnet.ResNet14MNIST
CURVE_BENDS = 3
CURVE_NUM_SAMPLES = 61
STEP_MODES = 2
STEP_BAD_MODES = 2
CODE_CHECK = False
if CODE_CHECK:
TEST_ITEMS = 5000
WORKER_COUNT = 0
EPOCHS = 1
CURVE_NUM_SAMPLES = 2
LOADER_ARGS = {"batch_size":BATCH_SIZE, "num_workers":WORKER_COUNT, "persistent_workers":WORKER_COUNT>0}
TRAINER_ARGS = {"accelerator":"gpu", "devices":"auto", "max_epochs":EPOCHS, "precision":16}
class LitModel(pl.LightningModule):
def __init__(self):
super().__init__()
if PARALLEL_MODELS > 1:
self.model = models.ParallelModel(PARALLEL_MODELS, MODEL, 10, **MODEL.kwargs)
else:
self.model = MODEL.base(10, **MODEL.kwargs)
self.loss = nn.CrossEntropyLoss()
self.mode = None
def forward(self, x):
return self.model(x)
def process_batch(self, batch):
x, y = batch
if PARALLEL_MODELS > 1:
y = torch.cat([y for _ in range(PARALLEL_MODELS)], dim=0)
return self(x), y
def training_step(self, batch, batch_idx):
y_hat, y = self.process_batch(batch)
loss = self.loss(y_hat, y)
self.log("train_loss", loss.item(), on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
y_hat, y = self.process_batch(batch)
loss = self.loss(y_hat, y)
correct = (y_hat.argmax(1) == y).type(torch.float).sum().item()
acc = 100*correct / y.size(dim=0)
self.log("val_loss", loss)
self.log("val_acc", acc)
def test_step(self, batch, batch_idx):
y_hat, y = self.process_batch(batch)
loss = self.loss(y_hat, y)
correct = (y_hat.argmax(1) == y).type(torch.float).sum().item()
acc = 100*correct / y.size(dim=0)
self.log("test_loss", loss)
self.log("test_acc", acc)
def configure_optimizers(self):
optimiser = torch.optim.SGD(self.parameters(), lr=LEARNING_RATE*PARALLEL_MODELS, momentum=MOMENTUM, weight_decay=L2_REG)
scheduler_dict = {
"scheduler": torch.optim.lr_scheduler.StepLR(optimiser, step_size=max(1,EPOCHS//3), gamma=LR_GAMMA),
"interval": "epoch"
}
return {"optimizer":optimiser, "lr_scheduler":scheduler_dict}
class LitModelConnect(pl.LightningModule):
def __init__(self, start_model = None, end_model = None, num_bends=CURVE_BENDS):
super().__init__()
self.loss = nn.CrossEntropyLoss()
self.model = curves.CurveNet(10, curves.PolyChain, MODEL.curve, num_bends, architecture_kwargs=MODEL.kwargs)
self.t = None
self.update_bn = False
# Initialise curve weights
if start_model != None:
self.model.import_base_parameters(start_model.model, 0)
self.model.import_base_parameters(end_model.model, num_bends - 1)
self.model.init_linear()
def forward(self, x, **kwargs):
return self.model(x, **kwargs)
def regulariser(self):
return 0.5 * L2_REG * self.model.l2
def process_batch(self, batch):
x, y = batch
return self(x, t=self.t), y
def set_t(self, t):
self.t = t
def training_step(self, batch, batch_idx):
y_hat, y = self.process_batch(batch)
loss = self.loss(y_hat, y) + self.regulariser()
self.log("train_loss", loss.item(), on_epoch=True)
return loss
def on_test_start(self):
if self.update_bn:
self.model.train()
def test_step(self, batch, batch_idx):
y_hat, y = self.process_batch(batch)
nll = self.loss(y_hat, y)
loss = nll + self.regulariser()
correct = (y_hat.argmax(1) == y).type(torch.float).sum().item()
acc = 100*correct / y.size(dim=0)
self.log("test_nll", nll)
self.log("test_loss", loss)
self.log("test_acc", acc)
def configure_optimizers(self):
optimiser = torch.optim.SGD(
filter(lambda param: param.requires_grad, self.parameters()),
lr=LEARNING_RATE,
momentum=MOMENTUM
)
scheduler_dict = {
"scheduler": torch.optim.lr_scheduler.StepLR(optimiser, step_size=max(1,EPOCHS//3), gamma=LR_GAMMA),
"interval": "epoch"
}
return {"optimizer":optimiser, "lr_scheduler":scheduler_dict}
def update_bn(model, loader):
if not utils.check_bn(model): return
bn_trainer = pl.Trainer(logger=False,**TRAINER_ARGS)
model.update_bn = True
bn_trainer.test(model, loader, verbose=False)
model.update_bn = False
def testCurve(model, trainer, test_loader, train_loader=None):
ts = np.linspace(0.0, 1.0, CURVE_NUM_SAMPLES)
if train_loader == None:
train_loader = test_loader
# BN has momentum so iter a few times to warm up
model.set_t(0.0)
for _ in range(3):
update_bn(model, train_loader)
# Test and compute max stats
max_loss = -1
for t in ts:
model.set_t(t)
update_bn(model, train_loader)
metrics = trainer.test(model, test_loader)
max_loss = max(max_loss, metrics[0]["test_loss"])
return max_loss
if __name__ == "__main__":
# Select training device
use_cuda = torch.cuda.is_available()
if use_cuda:
LOADER_ARGS["pin_memory"] = True
device = "cuda" if use_cuda else "cpu"
print(f"Training on {device}")
# Setup logging
logging.getLogger("lightning").setLevel(logging.ERROR)
def genLogger(log, path):
return pl.loggers.CSVLogger(path, name=log)
# Setup checkpointing
checkpoint = pl.callbacks.ModelCheckpoint(
#dirpath="checkpoints",
#save_last=True,
save_top_k=0,
save_weights_only=True
)
# Setup progress bar
progress_bar = pl.callbacks.RichProgressBar()
TRAINER_ARGS["callbacks"] = [checkpoint, progress_bar]
# ------------------------
# Prepare Datasets / Loaders
# ------------------------
# CIFAR10 has an expired cert
ssl._create_default_https_context = ssl._create_unverified_context
# Too many files open with file_descriptor strategy
torch.multiprocessing.set_sharing_strategy('file_system')
transform = []
# if DATASET == datasets.CIFAR10:
# transform += [transforms.Grayscale()]
transform += [
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]
transform = transforms.Compose(transform)
train_data = DATASET(
root=".data",
train=True,
download=True,
transform=transform
)
# Truncate data for quick checks
if TEST_ITEMS < 50_000:
train_data = torch.utils.data.Subset(train_data, range(TEST_ITEMS))
adverse_data = utils1.NoiseDataset(train_data)
test_data = DATASET(
root=".data",
train=False,
download=True,
transform=transform
)
# train_data = utils1.GPUDataset(train_data, "cuda")
# adverse_data = utils1.GPUDataset(adverse_data, "cuda")
# test_data = utils1.GPUDataset(test_data, "cuda")
train_loader = DataLoader(train_data, shuffle=True, **LOADER_ARGS)
adverse_loader = DataLoader(adverse_data, shuffle=True, **LOADER_ARGS)
LOADER_ARGS["batch_size"] = 1024
test_loader = DataLoader(test_data, **LOADER_ARGS)
curve_loader = DataLoader(train_data, **LOADER_ARGS)
# ------------------------
# Generate Data
# ------------------------
# Load state
state = {"minima":[], "paths":[]}
try:
state_file = open('state.p', 'rb')
state = pickle.load(state_file)
state_file.close()
except FileNotFoundError:
pass
# ------------------------
# Generate Minima
# ------------------------
# Generate good minima
good_minima = []
for _ in range(STEP_MODES):
model = LitModel()
model.mode = "train"
path=f"modes/{len(state['minima'])}"
trainer = pl.Trainer(logger=genLogger("train", path),**TRAINER_ARGS)
trainer.fit(model, train_loader, test_loader)
metrics = trainer.test(model, test_loader)[0]
trainer.save_checkpoint(path+"/model.ckpt")
record = {"idx":len(state["minima"]), "type":"good", "path":path, "loss":metrics["test_loss"], "acc":metrics["test_acc"]}
good_minima.append(record)
state["minima"].append(record)
# Generate adversarial init
model = LitModel()
model.mode = "adverse"
trainer = pl.Trainer(logger=genLogger("adverse", "modes"), **TRAINER_ARGS)
trainer.fit(model, adverse_loader)
trainer.save_checkpoint("modes/adverse.ckpt")
# Generate bad minima
bad_minima = []
for _ in range(STEP_BAD_MODES):
model = LitModel.load_from_checkpoint("modes/adverse.ckpt")
model.mode = "train"
path=f"modes/{len(state['minima'])}"
trainer = pl.Trainer(logger=genLogger("train", path), **TRAINER_ARGS)
trainer.fit(model, train_loader, test_loader)
metrics = trainer.test(model, test_loader)[0]
trainer.save_checkpoint(path+"/model.ckpt")
record = {"idx":len(state["minima"]), "type":"bad", "path":path, "loss":metrics["test_loss"], "acc":metrics["test_acc"]}
bad_minima.append(record)
state["minima"].append(record)
# ------------------------
# Generate Curves
# ------------------------
new_curves = []
# Connect good minima to existing
for mode in good_minima:
if mode["idx"] == 0: continue
other = np.random.randint(mode["idx"])
while state["minima"][other]["type"] != "good":
other = np.random.randint(mode["idx"])
new_curves.append((mode["idx"], other))
# Connect a bad minimum to a good minimum
other = np.random.randint(bad_minima[0]["idx"])
while state["minima"][other]["type"] != "good":
other = np.random.randint(bad_minima[0]["idx"])
new_curves.append((bad_minima[0]["idx"], other))
# Curve directly between bad minima
new_curves.append((bad_minima[0]["idx"], bad_minima[1]["idx"]))
# Random new curves
start = np.random.randint(len(state["minima"]))
other = np.random.randint(len(state["minima"]))
while start == other:
other = np.random.randint(len(state["minima"]))
new_curves.append((start, other))
start = np.random.randint(len(state["minima"]))
other = np.random.randint(len(state["minima"]))
while start == other:
other = np.random.randint(len(state["minima"]))
new_curves.append((start, other))
for start_idx, end_idx in new_curves:
start = LitModel.load_from_checkpoint(state["minima"][start_idx]["path"] + "/model.ckpt")
end = LitModel.load_from_checkpoint(state["minima"][end_idx]["path"] + "/model.ckpt")
path=f"curves/{len(state['paths'])}"
linear = LitModelConnect(start, end, 2)
trainer = pl.Trainer(logger=genLogger("linear", path), **TRAINER_ARGS)
lin_loss = testCurve(linear, trainer, curve_loader, test_loader)
trainer.save_checkpoint(path + "/linear.ckpt")
curve = LitModelConnect(start, end, CURVE_BENDS)
trainer = pl.Trainer(logger=genLogger("curve", path), **TRAINER_ARGS)
trainer.fit(curve, train_loader)
trainer.save_checkpoint(path + "/curve.ckpt")
curve_loss = testCurve(curve, trainer, curve_loader, test_loader)
curve_test_loss = testCurve(curve, trainer, test_loader, test_loader)
record = {"idx":len(state["paths"]), "path":path, "start":start_idx, "end":end_idx, "lin_loss":lin_loss, "curve_loss":curve_loss, "curve_test_loss":curve_test_loss}
state["paths"].append(record)
# Save state
pickle.dump(state, open('state.p', 'wb'))
|
jonasjuerss/mode-connectivity
|
james/main.py
|
main.py
|
py
| 12,280 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70073356027
|
from asyncio import coroutine, get_event_loop
from time import time
import requests
import json
class IngestaoDadosDogs:
def __init__(self, urls, dir):
self.urls = urls
self.dir = dir
@coroutine
def collect(self):
start = time()
loop = get_event_loop()
scrape_list = [loop.run_in_executor(None, requests.get, url) for url in self.urls]
for i, scrape in enumerate(scrape_list):
resp = yield from scrape
arquive = resp.json()
print(f'{i+1}: {resp.ok}')
with open(dir+f'{i+1}_File.json', 'w+') as file:
json.dump(arquive, file)
termino = time() - start
print(f'Fim da Operação: {termino:.2f} s')
# API que busca aleatoriamente imagem de cachorros na internet!
url_list = ['https://dog.ceo/api/breeds/image/random' for n in range(0,10)]
# Diretorio onda ira salvar o arquivo JSON
dir = "C:/Users/BlueShift/Desktop/Gustavo/Gustavo/MentoramaPythonPRO/MentoramaPythonPRO_MOD9/mod9pro/JsonConcorrentes/"
Dados = IngestaoDadosDogs(url_list, dir).collect()
loop = get_event_loop()
loop.run_until_complete(Dados)
|
gustavo-duarte-silva/MentoramaPythonPRO_MOD9
|
mod9pro/ScriptsAssincronos/scriptConcorrente.py
|
scriptConcorrente.py
|
py
| 1,155 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37187212499
|
# solved by satyam kumar (refernce https://www.youtube.com/watch?v=cjWnW0hdF1Y)
# question link https://leetcode.com/problems/longest-increasing-subsequence/
class Solution:
def lengthOfLIS(self, nums: List[int]) -> int:
# creating cache
lst=[1]*len(nums)
# iterating from end
for i in range(len(nums)-1,-1,-1):
# iterating from ith index till end
for j in range(i+1,len(nums)):
# increasing condition
if nums[i]<nums[j]:
# update the list
lst[i]=max(lst[i],1+lst[j])
return max(lst)
|
saty035/DSA_Python
|
Longest Increasing Subsequence_leetcode/Longest Increasing Subsequence.py
|
Longest Increasing Subsequence.py
|
py
| 675 |
python
|
en
|
code
| 2 |
github-code
|
6
|
38285343171
|
"""
Created on Fri Apr 24 11:34:48 2020
Matthew Irvine
1001401200
4/24/2020
Windows 10
"""
import os
def getFullSize(path):
#variables used to determine if the path is a file or a directory
isFile = os.path.isfile(path)
isDir = os.path.isdir(path)
#if it is a file
if isFile:
#calculate file size
tmpsize = os.path.getsize(path)
#printout the file and its corrosponding size
print(path, end = ':\t')
print(tmpsize)
#return the size of the file
return tmpsize
elif isDir: #otherwise if it is a directory
#list all of the file and subdirectories
listd = os.listdir(path)
#used to keep track of the size of the entire directory
fullSize = 0
for l in listd:
#new absolute path for the entry
newpath = path + '\\' + l
#add the size of subdirectory or file of each entry
fullSize += getFullSize(newpath)
return fullSize
def main():
#get absolute path to current working directory
size = getFullSize(os.getcwd())
print("The full size of " + os.getcwd() + " is:")
print(size, end = ' ')
print("bytes")
if __name__ == '__main__':
main()
|
Tropolopolo/ProgrammingLanguages
|
mli1200_PA6/mli1200_PA6.py
|
mli1200_PA6.py
|
py
| 1,257 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27773589780
|
import os
import asyncio
from telepyrobot.setclient import TelePyroBot
from pyrogram import filters
from pyrogram.types import Message
from telepyrobot import COMMAND_HAND_LER
from telepyrobot.utils.pyrohelpers import ReplyCheck
__PLUGIN__ = os.path.basename(__file__.replace(".py", ""))
__help__ = f"""
`{COMMAND_HAND_LER}sdmsg <message> | <time in seconds>`
The command will automatically destruct the message after specified time.
"""
@TelePyroBot.on_message(filters.command("sdmsg", COMMAND_HAND_LER) & filters.me)
async def self_destruct(c: TelePyroBot, m: Message):
input_str = m.text.split(None, 1)[1]
rm = await m.edit_text("`Meking self-destruct msg...`")
ttl = 0
if input_str:
if "=" in input_str:
msg, ttl = input_str.split("|")
else:
await m.reply_text("__Check help to know how to use__")
return
sd_msg = await m.reply_text(f"{msg}", reply_to_message_id=ReplyCheck(m))
await rm.delete()
await asyncio.sleep(int(ttl))
await sd_msg.delete()
else:
await m.edit_text("__Check help to know how to use__")
return
|
Divkix/TelePyroBot
|
telepyrobot/plugins/self_destruct.py
|
self_destruct.py
|
py
| 1,145 |
python
|
en
|
code
| 40 |
github-code
|
6
|
41045511776
|
#!/usr/bin/python3
# Simon, Simon, Silvija
number1 = 0
number2 = 0
operation = None
bAsk = 1
def add(number1, number2):
print(number1 + number2)
def subtract(number1, number2):
print(number1 - number2)
def multiply(number1, number2):
print(number1 * number2)
def divide(number1, number2):
try:
print(number1 / number2)
except ZeroDivisionError:
print("Hmm, that's not right ... can't divide by zero, right?\n")
def askNum():
while bAsk:
number1 = input("Give me the first number, please: ")
if (number1 == "done"):
print("See you!")
quit()
try:
number1 = float(number1)
except:
print("Whoa, this is not a float ...")
number1 = input("Give me the first number, please: ")
operation = input("Operation: (+, -, *, /): ")
if (operation != "+" and operation != "-" and operation != "*" and operation != "/"):
print("That's not an operation I want!!!")
operation = input("Operation: (+, -, *, /): ")
if (operation == "done"):
print("Farewell, young traveler")
quit()
number2 = input("Would you give me the second number, please? ")
if (number2 == "done"):
print("Bye")
quit()
try:
number2 = float(number2)
except:
print("Sorry man, not a float or 'done'! ...\n")
number2 = input("Would you give me the second number, please? ")
if operation == "+":
add(number1, number2)
elif operation == "-":
subtract(number1, number2)
elif operation == "*":
multiply(number1, number2)
elif operation == "/":
divide(number1, number2)
askNum()
|
simonSlamka/UCL-ITtech
|
programming/calc_pairChallenge.py
|
calc_pairChallenge.py
|
py
| 1,809 |
python
|
en
|
code
| 2 |
github-code
|
6
|
39620185683
|
# Importa e define oque for nescessário para o código
import pygame
import random
WIDTH = 880
HEIGHT = 660
from config import GAME, QUIT
def init_screen(screen):
# Variável para o ajuste de velocidade
clock = pygame.time.Clock()
# Carrega o fundo da tela inicial
background = pygame.image.load('Flying_Fox_Game/assets/img/tela de inicio final.png').convert()
background = pygame.transform.scale(background, (WIDTH, HEIGHT))
background_rect = background.get_rect()
running = True
while running:
# Ajusta a velocidade do jogo.
clock.tick(60)
# Processa os eventos (mouse, teclado, botão, etc).
for event in pygame.event.get():
# Verifica se foi fechado.
if event.type == pygame.QUIT:
state = QUIT
running = False
if event.type == pygame.KEYUP:
state = GAME
running = False
# A cada loop, redesenha o fundo e os sprites
#screen.fill(BLACK)
screen.blit(background, background_rect)
# Depois de desenhar tudo, inverte o display.
pygame.display.flip()
return state
|
RodrigoAnciaes/Flying_Fox_game
|
Flying_Fox_Game/first_screen.py
|
first_screen.py
|
py
| 1,182 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
34375411916
|
#!/usr/bin/env python3
from curses import wrapper
from functools import partial
from Gamepad import Gamepad
import curses
import time
def spd_scale(y0, x0, y1, x1, x):
"""Get the speed at the given value of time.
NOTE: The output is not clamped,
regardless of the reference points given."""
# Two point form of a line
# y-y0 = y2-y1/x2-x1 (x-x0)
m = (y1 - y0) / (x1 - x0)
c = y0
return c + m * (x - x0)
def main(stdscr):
"""Main loop for listening to input events and displaying speed using curses.
Takes in the window created by curses.
Call this function using curses' wrapper function in order to
not mess up terminal state on exceptions."""
# Curses Prelude
curses.noecho()
curses.cbreak()
curses.curs_set(False) # Don't show the cursor
stdscr.keypad(True)
stdscr.clear()
stdscr.nodelay(True) # Don't wait for ENTER to read input
stdscr.border()
spd_scale_c = partial(spd_scale, 100, 10, 0, 0)
w_down_counter = 0
speed = spd_scale_c(w_down_counter)
stdscr.attrset(curses.color_pair(1))
stdscr.addstr(0, curses.COLS // 2, "Motor Control", curses.A_DIM)
if not Gamepad.available():
print("Couldn't find a gamepad")
while not Gamepad.available():
time.sleep(1.0)
gamepad = Gamepad.Xbox360()
gamepad.startBackgroundUpdates()
try:
while True and gamepad.isConnected():
val = gamepad.axis("RT")
if gamepad.isPressed("A"):
w_down_counter = min(w_down_counter + 1, 10)
else:
w_down_counter = max(w_down_counter - 1, 0)
if gamepad.beenPressed("B"):
break
speed = spd_scale_c(w_down_counter)
stdscr.addstr(
4, curses.COLS // 2, f"Current speed is {speed}", curses.A_BOLD
)
stdscr.refresh()
stdscr.refresh()
time.sleep(0.1)
finally:
gamepad.disconnect()
wrapper(main)
|
kknives/rudra-training
|
motor-control/js_control.py
|
js_control.py
|
py
| 2,024 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29477807046
|
"""
Portfolio: LongWrite
#100DaysOfCode with Python
Day: 89
Date: 2023-07-21
Author: MC
"""
from tkinter import *
from time import strftime, gmtime
from tkinter.messagebox import showinfo
from tkinter import filedialog
# ---------------------------- constance ------------------------------------ #
FONT_48 = ("NewYork", 48)
FONT_36 = ("NewYork", 36)
FONT_32 = ("NewYork", 32)
FONT_24 = ("NewYork", 24)
FONT_20 = ("NewYork", 20)
FONT_16 = ("NewYork", 16)
is_start_typing = False
is_long_timer_start = False
is_short_timer_start = False
my_timer = None
long_time_set = 60
short_time_set = 5
long_timer_curr_state = long_time_set
short_timer_curr_state = short_time_set
# ---------------------------- some func ------------------------------------ #
def text_input_key_enter(event):
# key enter event
global is_start_typing
global is_long_timer_start
global is_short_timer_start
if is_start_typing is True and is_long_timer_start is True:
reset_short_timer()
if is_start_typing is False:
is_start_typing = True
is_long_timer_start = True
is_short_timer_start = True
start_timers()
# start_short_timer(short_time_set)
#
# if is_start_typing is False:
# is_start_typing = True
# start_long_timer(long_time_set)
def refresh_timers(
long_timer_state,
short_timer_state
):
# refresh timers func
global long_timer_curr_state
global short_timer_curr_state
global my_timer
global is_long_timer_start
time_format = '%M:%S'
long_time_clock.config(
text=strftime(
time_format,
gmtime(long_timer_curr_state)
)
)
short_time_clock.config(
text=strftime(
time_format,
gmtime(short_timer_curr_state)
)
)
if short_timer_curr_state <= 0:
# cleaning text box
clear_text()
short_timer_curr_state = short_time_set
if long_timer_curr_state <= 0:
# active save btn
save_btn_activator()
is_long_timer_start = False
if long_timer_curr_state > 0:
long_timer_curr_state -= 1
short_timer_curr_state -= 1
print(f"{short_timer_curr_state}")
my_timer = window.after(
1000,
refresh_timers,
long_timer_curr_state,
short_timer_curr_state
# long_timer_curr_state - 0.001,
# short_timer_curr_state - 0.001
)
def start_timers():
refresh_timers(
long_timer_curr_state,
short_timer_curr_state
)
def reset_short_timer():
# reset short timer current state -> to test
global is_short_timer_start
global short_timer_curr_state
if is_short_timer_start is True:
short_timer_curr_state = short_time_set
def save_btn_activator():
# make save btn active
save_btn.config(
state='active'
)
def clear_text():
"""
clear text input field
"""
text_input.delete("1.0", END)
def save_work():
"""
save work :)
"""
if len(text_input.get('1.0', END)) > 0:
filetype = (
('txt file', '*.txt'),
)
try:
file = filedialog.asksaveasfile(filetypes=filetype)
if file is None:
raise ValueError("The file name must be set")
showinfo(
title="Info",
message="Your work was saved correctly ;)"
)
with open(file.name, 'w') as f:
f.writelines(text_input.get('1.0', END))
except Exception as e:
showinfo(
title="Error",
message=e.__str__()
)
# print(text_input.get('1.0', END))
# ---------------------------- UI ------------------------------------ #
# window ini
window = Tk()
window.title("Long Write Project by MC")
window.config(
padx=50,
pady=25,
bg="#323232"
)
window.minsize(
width=800,
height=600
)
# some controls
long_time_text_label = Label(
text="Long time",
font=FONT_20,
fg="white",
pady=15,
)
long_time_text_label.grid(
row=0,
column=0
)
long_time_clock = Label(
text="00:00",
font=FONT_24,
fg='white',
pady=15
)
long_time_clock.grid(
row=1,
column=0
)
short_time_text_label = Label(
text="Short time",
font=FONT_20,
fg='white',
pady=15
)
short_time_text_label.grid(
row=0,
column=1
)
short_time_clock = Label(
text="00:00",
font=FONT_24,
fg='red',
pady=15
)
short_time_clock.grid(
row=1,
column=1
)
# text
v = Scrollbar(
window,
orient='vertical'
)
text_input = Text(
window,
yscrollcommand=v.set,
font=FONT_20
)
text_input.grid(
row=2,
columnspan=2
)
# some buttons
# clear button
clear_btn = Button(
text="Clear text",
highlightthickness=0,
command=clear_text
)
clear_btn.grid(
row=3,
column=0
)
# save button
save_btn = Button(
text="Save work",
highlightthickness=0,
command=save_work,
state='disabled',
)
save_btn.grid(
row=3,
column=1
)
# ---------------------------- start window ------------------------------------ #
# on key event
text_input.bind("<Key>", text_input_key_enter)
window.mainloop()
|
chinek01/LongWrite
|
main.py
|
main.py
|
py
| 5,331 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40449028487
|
from ray.rllib.evaluation import MultiAgentEpisode, RolloutWorker
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.env import BaseEnv
from ray.rllib.policy import Policy
from typing import Dict
from ray.rllib.policy.sample_batch import SampleBatch
import numpy as np
import time
import csv
import os
def trainEvlogs2csv(folder_name,file_name,csvDict, n_episode):
fieldnames = list(csvDict.keys())
if n_episode == 1:
#print('this happens')
csvfile = open(folder_name + file_name, 'w', newline='')
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
else:
csvfile = open(folder_name + file_name, 'a', newline='')
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(csvDict)
class MyCallbacks(DefaultCallbacks):
def on_episode_start(self, *, worker: RolloutWorker, base_env: BaseEnv,
policies: Dict[str, Policy],
episode: MultiAgentEpisode, **kwargs): # , env_index: int, **kwargs):
# print("episode {} (env-idx) started.".format(episode.episode_id))
episode.user_data["step"] = 0
#episode.user_data["nb_communications_episode"] = 0
#episode.user_data["collisions"] = 0
#episode.user_data["list_communications"] = []
#episode.user_data["comms_per_step"] = []
#episode.user_data["histogram_communications"] = np.zeros(policies["policy_0"].config["horizon"]+1)#np.zeros(101)
episode.user_data["secs_per_episode"] = time.time()
episode.user_data["mean_secs_per_ts"] = 0
episode.user_data["auxiliary_time"] = time.time()
episode.user_data["auxiliary_time_episode"] = time.time()
def on_episode_step(self, *, worker: RolloutWorker, base_env: BaseEnv,
episode: MultiAgentEpisode, **kwargs):
auxtime_ts = time.time() - episode.user_data["auxiliary_time"]
episode.user_data["mean_secs_per_ts"] += auxtime_ts
aux = episode.last_observation_for(0)
aux12 = aux.reshape((1,aux.shape[0]))
#aux2 = episode.last_action_for(0)
#aux3 = episode.last_info_for(0)
n_agents = worker.env.num_agents
episode.batch_builder.count += n_agents - 1 ### THIS ONE IS THE ONE TO CHANGE!!
#collisions = 0
if episode.user_data["step"] != 0:
for i in range(n_agents):
aux4 = episode.policy_for(i)
#episode.user_data["nb_communications_episode"] += episode.last_info_for(i)["communication"]
#collisions += episode.last_info_for(i)["collisions"]
#for ii in range(int(episode.last_info_for(i)["communication"])):
# episode.user_data["comms_per_step"].append(episode.user_data["step"])
#episode.user_data["histogram_communications"][episode.user_data["step"]] += episode.last_info_for(i)["communication"]
#episode.user_data["list_communications"].append(episode.user_data["nb_communications_episode"])
#episode.user_data["collisions"] += collisions / 2
#if episode.user_data["collisions"] != 0:
# print("COLLISION!")
episode.user_data["step"] += 1
#print(episode.user_data["step"])
debug = False
if debug:
workerdebug = worker
policy_index = worker.policies_to_train[0]
policydebug = worker.policy_map[policy_index]
wdebug = policydebug.get_weights()
predicate = policydebug.loss_initialized()
if predicate:
# overwrite default VF prediction with the central VF
#sample_batch[SampleBatch.VF_PREDS] = policy.compute_central_vf(
#sample_batch[SampleBatch.CUR_OBS], sample_batch[OPPONENT_OBS],
#sample_batch[OPPONENT_ACTION])
t1 = time.time()
#encoder_debug = policydebug.compute_encoding_layer(aux12)
#print(encoder_debug)
t1 = time.time()-t1
t2 = time.time()
action_debug = policydebug.compute_action(aux12)
output_inputs = policydebug.output_inputs(aux12)
t2 = time.time() - t2
#print("WEIGHTS")
#print(wdebug)
#print("END WEIGHTS")
episode.user_data["auxiliary_time"] = time.time()
def on_episode_end(self, *, worker: RolloutWorker, base_env: BaseEnv,
policies: Dict[str, Policy], episode: MultiAgentEpisode, **kwargs):
#horizon = policies["policy_0"].config["horizon"]
#print("episode time:",time.time() - episode.user_data["auxiliary_time_episode"])
policy_index = worker.policies_to_train[0]
episode.custom_metrics["secs_per_episode"] = time.time()-episode.user_data["secs_per_episode"]
#episode.custom_metrics["mean_secs_per_ts"] = episode.user_data["mean_secs_per_ts"] / policies["policy_0"].config["horizon"] #100.0
episode.custom_metrics["mean_secs_per_ts"] = episode.user_data["mean_secs_per_ts"] / \
policies[policy_index].config["horizon"] # 100.0
episode.custom_metrics["nb targets tracked"] = np.sum(worker.env.num_targets_tracked[0])
episode.custom_metrics["episode length"] = worker.env.steps
episode.custom_metrics["success_rate"] = worker.env.success
episode.custom_metrics["entropy"] = worker.env.last_entropy_log
if worker.env.ntargets == 3:
episode.custom_metrics["nb_targets_tracked_3targets"] = np.sum(worker.env.num_targets_tracked[0])
episode.custom_metrics["episode length_3targets"] = worker.env.steps
if worker.env.ntargets == 6:
episode.custom_metrics["nb_targets_tracked_6targets"] = np.sum(worker.env.num_targets_tracked[0])
episode.custom_metrics["episode length_6targets"] = worker.env.steps
if worker.env.ntargets == 9:
episode.custom_metrics["nb_targets_tracked_9targets"] = np.sum(worker.env.num_targets_tracked[0])
episode.custom_metrics["episode length_9targets"] = worker.env.steps
if worker.env.ntargets == 12:
episode.custom_metrics["nb_targets_tracked_12targets"] = np.sum(worker.env.num_targets_tracked[0])
episode.custom_metrics["episode length_12targets"] = worker.env.steps
#episode.custom_metrics["nb_communications_episode"] = episode.user_data["nb_communications_episode"]
#episode.custom_metrics["collisions"] = episode.user_data["collisions"]
#episode.hist_data["communications_histogram"] = episode.user_data["list_communications"]
#episode.hist_data["communications_per_step"] = episode.user_data["comms_per_step"]
#if episode.user_data["collisions"] == 0:
# episode.custom_metrics["success"] = 1
#else:
# episode.custom_metrics["success"] = 0
#episode.custom_metrics["scn_"+str(base_env.envs[0]._env.world.current_scenario)+"_success"] = episode.custom_metrics["success"]
#episode.custom_metrics["scn_" + str(base_env.envs[0]._env.world.current_scenario) + "_nb_comm_episode"] = episode.custom_metrics["nb_communications_episode"]
#nEpisodesxScenario = worker.policy_config['train_batch_size']/(base_env.envs[0]._env.num_agents*policies["policy_0"].config["horizon"])
#if base_env.envs[0]._env.episode_id % nEpisodesxScenario == 0 and base_env.envs[0]._env.world.test == 1: # Alternatively use the in_evaluation value in config
#if base_env.envs[0]._env.world.time_step % (worker.policy_config['train_batch_size']/base_env.envs[0]._env.num_agents) == 0 and worker.policy_config["model"]["custom_model_config"]["training"]==False: # Alternatively use the in_evaluation value in config
#if base_env.envs[0]._env.world.time_step % (worker.policy_config['train_batch_size']) == 0 and worker.policy_config["model"]["custom_model_config"]["training"] == False: # Alternatively use the in_evaluation value in config
#base_env.envs[0]._env.world.next_eval_scenario()
#base_env.envs[0]._env.world.set_eval_scenario()
# training_eval_logs = base_env.envs[0]._env.world.test
# if worker.policy_config["model"]["custom_model_config"]["training"]==False and training_eval_logs == True:
# goal_achieved = 1
# for i in range(worker.env.num_agents):
# if episode.last_info_for(i)["goal_achieved"] == 0:
# goal_achieved = 0
#
# if episode.custom_metrics["success"] == 0:
# goal_achieved = 0
#
# timesteps = episode.last_info_for(0)["step"]
#
# checkpoint = policies["policy_0"].config["model"]["custom_model_config"]["checkpoint"]
# scenario = base_env.envs[0]._env.world.current_scenario
# folder_name = base_env.envs[0]._env.folder_name + 'training_eval/'
# file_name = 'checkpoint_' + str(checkpoint) + '.csv'
# n_episode = base_env.envs[0]._env.episode_id
# csvDict = {
# 'scenario': scenario,
# 'ep_safety': episode.custom_metrics["success"],
# 'ep_nb_communications': episode.custom_metrics["nb_communications_episode"],
# 'ep_goal_achieved': goal_achieved,
# 'step': timesteps
# }
# for i in range(policies["policy_0"].config["horizon"]+1): #101):
# csvDict[str(i)] = episode.user_data["histogram_communications"][i]
#
# if not os.path.isdir(folder_name):
# os.makedirs(folder_name)
# trainEvlogs2csv(folder_name,file_name,csvDict, n_episode)
#print("episode END")
#"""
def on_sample_end(self, *, worker: RolloutWorker, samples: SampleBatch,
**kwargs):
print("returned sample batch of size {}".format(samples.count))
#"""
def on_train_result(self, *, trainer, result: dict, **kwargs):
#print("trainer.train() result: {} -> {} episodes".format(trainer, result["episodes_this_iter"]))
work = trainer.workers.local_worker()
policy_index = work.policies_to_train[0]
policydebug = work.policy_map[policy_index]
wdebug = policydebug.get_weights()
#kernelweights = wdebug['default_policy/dense/kernel']
#if np.any(np.isnan(kernelweights)):
# print("here's a nan")
# if trainer.config["model"]["custom_model_config"]["training"]:
# if 313 <= trainer.iteration < 313 * 2: # change distribution of sampled episodes 15000 episodes
# work.foreach_env(lambda env: env._env.world.set_scenario_distr([0.25, 0.75, 0]))
# if 313 * 2 <= trainer.iteration < 313 * 3: # change distribution of sampled episodes 15000 episodes
# work.foreach_env(lambda env: env._env.world.set_scenario_distr([0.125, 0.125, 0.75]))
# if 313 * 3 <= trainer.iteration < 313 * 4: # change distribution of sampled episodes 15000 episodes
# work.foreach_env(lambda env: env._env.world.set_scenario_distr([0.0, 0.25, 0.75]))
# if 313 * 4 <= trainer.iteration: # change distribution of sampled episodes 15000 episodes
# work.foreach_env(lambda env: env._env.world.set_scenario_distr([0.0, 0.0, 1.0]))
# you can mutate the result dict to add new fields to return
result["callback_ok"] = True
"""
def on_postprocess_trajectory(
self, *, worker: RolloutWorker, episode: MultiAgentEpisode,
agent_id: str, policy_id: str, policies: Dict[str, Policy],
postprocessed_batch: SampleBatch,
original_batches: Dict[str, SampleBatch], **kwargs):
print("postprocessed {} steps".format(postprocessed_batch.count))
if "num_batches" not in episode.custom_metrics:
episode.custom_metrics["num_batches"] = 0
episode.custom_metrics["num_batches"] += 1
#"""
|
tud-amr/AC-LCP
|
utils/callbacks.py
|
callbacks.py
|
py
| 12,175 |
python
|
en
|
code
| 2 |
github-code
|
6
|
9980232390
|
class Settings():
""" A class to store all the settings for our Alien Invasion Game. """
def __init__(self):
""" Initialize the game's settings """
#Screen settings
#Height and Width of our game screen.
self.screen_width = 1200
self.screen_height = 800
#Set the background color of the window
self.bg_color = (230,230,230)
#Ship Settings
# self.ship_speed_factor = 1.5
self.ship_limit = 3
#Bullet Settings(small rectangle)
# self.bullet_speed_factor = 3
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = 60, 60, 60
self.bullets_allowed = 3
#Alien Settings
# self.alien_speed_factor = 0.5
self.fleet_drop_speed = 10
# #fleets direction of 1 represents right and -1 represents left.
# self.fleet_direction = 1
#How quickly the game speeds up
self.speedup_scale = 1
#How quickly the alien point score scale increase
self.score_scale = 1.5
self.initialize_dyanamic_settings()
def initialize_dyanamic_settings(self):
"""Initialize settings that change throughout the game"""
self.ship_speed_factor = 1.5
self.bullet_speed_factor = 3
self.alien_speed_factor = 1
#fleets direction of 1 represents right and -1 represents left.
self.fleet_direction = 1
#Scoring
self.alien_points = 50
def increase_speed(self):
"""Increase speed settings and alien points value"""
self.ship_speed_factor *= self.speedup_scale
self.bullet_speed_factor *= self.speedup_scale
self.alien_speed_factor *= self.speedup_scale
self.alien_points = int(self.alien_points * self.score_scale)
print(self.alien_points)
|
mihirjadhav04/Alien_Invasion_Game
|
settings.py
|
settings.py
|
py
| 1,857 |
python
|
en
|
code
| 0 |
github-code
|
6
|
741344040
|
import os
from pytest_mock import MockFixture
from app.config import get_config, DevelopmentConfig, ProductionConfig, PRODUCTION, DEVELOPMENT
def test_get_config_in_development():
app_config = get_config()
assert app_config.env == DEVELOPMENT
assert isinstance(app_config, DevelopmentConfig)
def test_get_config_in_production(mocker: MockFixture):
mocked_environ = {
'ENV': 'PROD',
'API_URL': 'http://test.com',
'API_EMAIL': '[email protected]',
'API_PASSWORD': 'pass123',
'BROKER_URI': 'broker.com',
'BROKER_PORT': '1232'
}
mocker.patch.dict(os.environ, mocked_environ)
app_config = get_config()
assert app_config.env == PRODUCTION
assert isinstance(app_config, ProductionConfig)
|
hrozan/utfpr-final-paper
|
legacy/smart-object/tests/test_config.py
|
test_config.py
|
py
| 771 |
python
|
en
|
code
| 2 |
github-code
|
6
|
39620196443
|
# ===== Inicialização =====
# ----- Importa e inicia pacotes
import pygame
import random
pygame.init()
# ----- Gera tela principal
WIDTH = 880
HEIGHT = 800
window = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption('Flying_Fox')
gravity = 1
difficult = 0
# ----- Inicia assets
METEOR_WIDTH = 100
METEOR_HEIGHT = random.randint(300, 450)
font = pygame.font.SysFont(None, 48)
background = pygame.image.load('Folder_de_Testes/assets/img/snow_day.jpeg').convert()
background = pygame.transform.scale(background, (WIDTH, HEIGHT))
meteor_img = pygame.image.load('Folder_de_Testes/assets/img/Tree.png').convert_alpha()
meteor_img = pygame.transform.scale(meteor_img, (METEOR_WIDTH, METEOR_HEIGHT))
# ----- Inicia estruturas de dados
# Definindo os novos tipos
class Fox(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
Fox_WIDTH = 70
Fox_HEIGHT = 70
Fox1 = pygame.image.load('Folder_de_Testes/assets/img/raposafinal.png').convert_alpha()
Fox1 = pygame.transform.scale(Fox1, (Fox_WIDTH, Fox_HEIGHT))
Fox2 = pygame.image.load('Folder_de_Testes/assets/img/snowflake.png').convert_alpha()
Fox2 = pygame.transform.scale(Fox2, (Fox_WIDTH, Fox_HEIGHT))
Fox3 = pygame.image.load('Folder_de_Testes/assets/img/Fox.jpeg').convert_alpha()
Fox3 = pygame.transform.scale(Fox3, (Fox_WIDTH, Fox_HEIGHT))
self.images = [Fox1,Fox2,Fox3]
self.image = Fox1
self.rect = self.image.get_rect()
self.rect.centerx = WIDTH / 4
self.rect.bottom = HEIGHT - 200
self.speedy = 1
self.now_on_windon = 0
def update(self):
self.rect.y += self.speedy
self.speedy += gravity + 0.1 * (-self.speedy)
self.now_on_windon = (self.now_on_windon + 1) % 3
self.image = self.images[self.now_on_windon]
self.mask = pygame.mask.from_surface(self.image)
# Mantem dentro da tela
if self.rect.bottom > HEIGHT:
pygame.QUIT
self.rect.bottom = HEIGHT
#game = False
if self.rect.top < 0:
pygame.QUIT
self.rect.top = 0
def pulo(self):
self.speedy += -18
class Meteor(pygame.sprite.Sprite):
def __init__(self, img):
# Construtor da classe mãe (Sprite).
pygame.sprite.Sprite.__init__(self)
METEOR_HEIGHT = random.randint(50, 250)
self.image = img
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
self.rect.x = (WIDTH-METEOR_WIDTH)
self.rect.y = random.randint(10,HEIGHT)
self.speedx = random.randint(-5, -3)
METEOR_HEIGHT = random.randint(50, 250)
def update(self):
# Atualizando a posição do meteoro
self.rect.x += self.speedx
# Se o meteoro passar do final da tela, volta para cima e sorteia
# novas posições e velocidades
if self.rect.top > HEIGHT or self.rect.right < 0 or self.rect.left > WIDTH:
self.rect.x = (WIDTH-METEOR_WIDTH)
self.rect.y = random.randint(10,HEIGHT)
self.speedx = random.randint(-5, -3)
game = True
# Variável para o ajuste de velocidade
clock = pygame.time.Clock()
FPS = 15
# Criando um grupo de meteoros
all_sprites = pygame.sprite.Group()
all_meteors = pygame.sprite.Group()
# Criando o jogador
player = Fox()
all_sprites.add(player)
# Criando os meteoros
for i in range(2):
meteor = Meteor(meteor_img)
all_sprites.add(meteor)
all_meteors.add(meteor)
# ===== Loop principal =====
while game:
fpdif = FPS + difficult
print(fpdif)
clock.tick(fpdif)
#print(clock)
difficult += 0.01
# ----- Trata eventos
for event in pygame.event.get():
# ----- Verifica consequências
if event.type == pygame.QUIT:
game = False
# Verifica se apertou alguma tecla.
# Verifica se soltou alguma tecla.
if event.type == pygame.KEYUP:
# Dependendo da tecla, altera a velocidade.
if event.key == pygame.K_SPACE:
Fox.pulo(player)
# ----- Atualiza estado do jogo
# Atualizando a posição dos meteoros
all_sprites.update()
hits = pygame.sprite.spritecollide(player,all_meteors,True, pygame.sprite.collide_mask)
if len(hits) > 0:
game = False
# ----- Gera saídas
window.fill((0, 0, 0)) # Preenche com a cor branca
window.blit(background, (0, 0))
# Desenhando meteoros
all_sprites.draw(window)
pygame.display.update() # Mostra o novo frame para o jogador
# ===== Finalização =====
pygame.quit() # Função do PyGame que finaliza os recursos utilizados
|
RodrigoAnciaes/Flying_Fox_game
|
Folder_de_Testes/Flappy.py
|
Flappy.py
|
py
| 4,902 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15662314012
|
'''
The 'd' in projd stands for directory. Project based helper functions. A
project is a group of files and directories within a common root dir. This
module contains functions for finding the root directory, etc.
There are two sub-organizing principles seen in projects, based around how they
find the root directory of the project:
- In the "cwd" approach, exemplified by `git`, code and executables find the
project based on the current working directory. For example, to work in a
git repository, one must first `cd` somewhere into the repository directory
(or a subdir).
- In the "which" approach, exemplified by a django `manage.py` script, code and
executables find the project based on the path of the executable itself. For
example, a django `manage.py` script expects to be located in the root of
the.project.
The advantage to the "cwd" approach is that one set of binaries can be used
with multiple projects.
The advantages of the "which" approach are that one can run the binaries from
anywhere and that a different version of code/binaries can be associated with
each project.
'''
import os
import sys
#############################
# PROJECT DIRECTORY FUNCTIONS
def cwd_token_dir(token):
'''
Find the directory containing token, starting from the current working
directory.
Start walking up the directory tree from the current working directory and
return the first directory found that contains a filename, dirname, or
linkname that matches token. If no directory is found containing token,
return None.
:param token: a path basename.
'''
return token_dir(cwd_dir(), token)
def script_token_dir(token):
'''
Find the directory containing token, starting from the location of the
running script.
Start walking up the directory tree from the location of the currently
running script (as indicated by sys.argv[0]) and return the first directory
found that contains a filename, dirname, or linkname that matches token.
If no directory is found containing token, return None.
:param token: a path basename.
'''
return token_dir(script_dir(), token)
#####################
# AUXILIARY FUNCTIONS
def token_dir(path, token):
'''
Find the directory containing token, starting from path.
Start walking up the directory tree from path and return the first
directory found that contains a filename, dirname, or linkname that matches
token. If no directory is found containing token, return None.
:param path: a path at which to start searching for a directory containing
token.
:param token: a path basename.
'''
# make sure that token does not contain any directories.
basename = os.path.basename(token)
for d in walk_up(path):
if os.path.exists(os.path.join(d, basename)):
return d
return None
def script_dir(realpath=False):
'''
Return the script directory from sys.argv[0] as an absolute path using
os.path.abspath (default) or os.path.realpath (if realpath is True).
'''
d = os.path.dirname(sys.argv[0])
if realpath:
return os.path.realpath(d)
else:
return os.path.abspath(d)
def cwd_dir(realpath=False):
'''
Return the current working directory as an absolute path using
os.path.abspath (default) or os.path.realpath (if realpath is True).
Note that although the docs do not explicitly say so, os.getcwd() might
already return a real or absolute path, so this function might be
redundant.
'''
d = os.getcwd()
if realpath:
return os.path.realpath(d)
else:
return os.path.abspath(d)
def walk_up(path, realpath=False):
'''
First normalize path using os.path.expanduser, then os.path.abspath
(default) or os.path.realpath (if realpath is True). Then yield path and
every directory above path.
Example:
list(walk_up('~/tmp/./')) ->
['/Users/td23/tmp', '/Users/td23', '/Users', '/']
path: Should be a directory.
'''
if realpath:
curdir = os.path.realpath(os.path.expanduser(path))
else:
curdir = os.path.abspath(os.path.expanduser(path))
while 1:
yield curdir
curdir, tail = os.path.split(curdir)
if not tail:
break
def which(program):
'''
In python3.3, see shutil.which(). Emulate unix 'which' command. If program
contains any directories (e.g. './foo'), return program, else if program is
found on the PATH, return the absolute path to it, otherwise return None.
http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python/377028#377028
'''
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
|
todddeluca/projd
|
projd.py
|
projd.py
|
py
| 5,109 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27241749881
|
import pygal
from random import randint
class Die:
def __init__(self, sides=6):
self.sides = sides
def roll(self):
return randint(1, self.sides)
die = Die(8)
die_1 = Die(8)
results = [die.roll() + die_1.roll() for x in range(1000)]
frequencies = [results.count(x) for x in range(2, 2 * die.sides + 1)]
# pygal.bar
hist = pygal.Bar()
hist.title = 'Rolling D6 - 1000'
hist.x_labels = [str(x) for x in range(2, 2 * die.sides + 1)]
hist.x_title = 'Sides'
hist.y_title = 'Frequency'
hist.add('D6', frequencies)
hist.render_to_file('recap_die_2d8.svg')
|
mbrad26/Data-Vizualization
|
Recap/recap_die_2d8.py
|
recap_die_2d8.py
|
py
| 580 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4829544125
|
def main():
#fuel()
taqueria()
def fuel():
tank = float(tank_status()) * 100
if tank <= 1:
print('E')
elif tank >= 99:
print('F')
else:
print(str(int(tank)) + "%")
def tank_status():
while True:
fract = input("Fraction: ").split("/")
try:
gas = int(fract[0]) / int(fract[1])
except:
print("That's not a proper fraction")
continue
if gas <= 1:
return f'{gas:.2f}'
print("Can't overfill your gas tank")
def taqueria():
while True :
item = input("Item: ")
pass
main()
|
Calvin-Spens/scripting
|
python_scripting/cs50_problems/problem_set_3.py
|
problem_set_3.py
|
py
| 637 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40464221544
|
import os.path
import re
import numpy as np
from PIL import Image
NUM_RE = re.compile(r'(\d+)')
maxint = 999999
WHITE_LIST_FORMATS = {'png', 'jpg', 'jpeg', 'bmp'}
def hstack_images(input_filenames, target_size=(224, 224)):
"""
Horizontally stack all images from @input_filenames in order and write to @output_filename
"""
images = list(map(lambda i: i.resize(target_size), map(Image.open, input_filenames)))
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = Image.new('RGB', (total_width, max_height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset, 0))
x_offset += im.size[0]
return new_im
def should_include_image(path, start_num, end_num):
"""Returns true if an image path should be included in the set, false
otherwise.
"""
fname = path.lower()
for extension in WHITE_LIST_FORMATS:
if fname.endswith('.' + extension):
num_match = NUM_RE.search(fname)
if num_match:
num, = num_match.groups()
num = int(num, 10)
return start_num <= num <= end_num
return False
def flow_from_directory(directory, a, b, c, target_size=(224, 224)):
for dirpath, dirnames, fnames in os.walk(directory):
if len(dirnames) == 0:
# we are at a top-level directory, extract the images in our range
xs = []
ys = []
for fname in fnames:
if should_include_image(fname, a, b):
xs.append(os.path.join(dirpath, fname))
elif should_include_image(fname, b + 1, c):
ys.append(os.path.join(dirpath, fname))
xs.sort()
ys.sort()
if not xs or not ys:
continue
x_imgs = np.asarray(hstack_images(xs, target_size=target_size))
y_imgs = np.asarray(hstack_images(ys, target_size=target_size))
yield (x_imgs, y_imgs)
|
eklitzke/dnn-fastai-project
|
vidextend/flow.py
|
flow.py
|
py
| 2,047 |
python
|
en
|
code
| 2 |
github-code
|
6
|
34107122191
|
from flask import g, request, current_app
import iot_api_core
import time
class InstanceVersionBaseBehavior():
def __init__(self, widget_type, namespace, instance_id):
self.widget_type = widget_type
self.namespace = namespace
self.instance_id = instance_id
self.lumavate = iot_api_core.Lumavate()
self.temp = {}
self.post_version_create_handlers = []
self.experience_info = None
self._instance = None
self.status_id = None
data = request.get_json(silent=True)
if data:
self.status_id = data.get('statusId')
if self.namespace is None:
self.namespace = self.rest_get_single(self.instance_id)['namespace']
@property
def properties(self):
return []
@property
def components(self):
return []
@property
def page_security_property(self):
page_security = {
'always': 'Always Render',
'user-logged-in': 'Only Render When User Logged In',
'user-not-logged-in': 'Only Render When User NOT Logged In',
'prod-registered': 'Only Render When Product Registered',
'device-authorized': 'Only Render When Device Authorized',
'prod-not-registered': 'Only Render When Product NOT Registered',
'device-not-authorized': 'Only Render When Device NOT Authorized',
}
return iot_api_core.DropdownProperty('General', 'General Settings', 'pageSecurity', 'Page Security', self, default='always', options=page_security)
@property
def experience_id(self):
self.load_experience_info()
return self.experience_info.get('id')
@property
def model_id(self):
self.load_experience_info()
return self.experience_info.get('modelId')
def load(self, version_name):
instance = self.rest_get_single(self.instance_id)
data = instance[version_name + 'Version']['data']
for x in self.properties:
if not x.name.startswith('instance__'):
x.read(data)
def get_general_properties(self, include_auth=False):
return [
self.instance_name_property(),
self.instance_page_type_property(include_auth=include_auth),
iot_api_core.ToggleProperty('General', 'General Settings', 'displayBackgroundImage', 'Display Background Image', self, default=False),
iot_api_core.ImageProperty('General', 'General Settings', 'backgroundImage', 'Background Image', self),
iot_api_core.ColorProperty('General', 'General Settings', 'backgroundColor', 'Background Color', self, default='#e2e2e2'),
self.page_security_property
]
def load_experience_info(self):
if not self.experience_info:
results = self.lumavate.get('/iot/v1/experiences?siteName=' + self.namespace)
if len(results) > 0:
self.experience_info = results[0]
self.model_info = self.lumavate.get('/iot/v1/models/' + str(self.experience_info['modelId']))
else:
self.experience_info = {}
self.model_info = {}
def rest_get_single(self, id):
if self._instance is None:
self._instance = self.lumavate.get('/iot/v1/widget-instances/' + str(id))
return self._instance
def get_version_id(self, instance_id, version_name):
instance = self.rest_get_single(instance_id)
return str(instance[version_name + 'VersionId'])
def get_property(self, name):
return next((p for p in self.properties if p.name == name), None)
def get_collection_rest_uri(self):
return '/iot/v1/widget-instances/' + str(self.instance_id) + '/versions'
def get_single_rest_uri(self, version_id):
return self.get_collection_rest_uri() + '/' + str(version_id)
def rest_get_collection(self):
return self.lumavate.get(self.get_collection_rest_uri())
def instance_name_property(self):
instance = self.rest_get_single(self.instance_id)
return iot_api_core.TextProperty('General', 'General Settings', 'instance__name', 'Page Name', self, rows=0, default=instance['name'])
def resolve_images(self, data):
if isinstance(data, dict):
if 'key' in data and 'versionId' in data:
image_data = self.lumavate.post('/iot/v1/files/preview/' + data['key'] + '/' + data['versionId'], {})
for f in ['contentType', 'url', 'mobileUrl']:
if f in data:
image_data[f] = data.get(f)
return image_data
else:
return { k: self.resolve_images(data[k]) for k in data }
elif isinstance(data, list):
return [self.resolve_images(x) for x in data]
else:
return data
def collapse_language(self, data):
lang = 'en-us'
if isinstance(data, dict):
if lang in data:
return data[lang]
else:
return { k: self.collapse_language(data[k]) for k in data }
elif isinstance(data, list):
return [self.collapse_language(x) for x in data]
else:
return data
def instance_page_type_property(self, include_auth=False):
page_types = {
'home': 'Home',
'registration': 'Registration',
'auth': 'Auth',
'error': 'Error',
'normal': '<Normal>'
}
if include_auth == False:
del page_types['auth']
return iot_api_core.DropdownProperty('General', 'General Settings', 'pageType', 'Page Type', self, default='normal', options=page_types)
def rest_create(self):
instance = self.rest_get_single(self.instance_id)
if instance:
payload = {'data': self.validate_data(request.get_json())}
results = self.lumavate.post(self.get_collection_rest_uri() + '-direct', payload)
instance['futureVersionId'] = results['id']
for vch in self.post_version_create_handlers:
vch()
self.post_version_create_handlers = []
if instance.get('futureVersion') is not None:
results['delta'] = self.get_delta_document(instance.get('futureVersion').get('data'), results['data'])
else:
results['delta'] = results.get('data')
return results
def background(self, function, args=[], kwargs={}):
data = {
'namespace': self.namespace,
'widgetInstanceId': self.instance_id,
'widgetType': current_app.config['WIDGET_ID'],
'method': function.__name__,
'args': args,
'kwargs': kwargs,
'statusId': self.status_id
}
self.lumavate.post('/iot/v1/background', data)
def run_background(self):
self.lumavate.put('/iot/v1/statuses/' + self.status_id, {'percent': 100})
return {'a': 4}
data = request.get_json()
method = data.get('method')
args = data.get('args')
kwargs = data.get('kwargs')
getattr(self, method)(*args, **kwargs)
def publish(self, version_name):
instance = self.rest_get_single(self.instance_id)
if not instance:
return
if version_name not in ['future', 'draft', 'production', 'current']:
return
version = instance.get(version_name + 'Version')
#for x in self.properties:
# x.read(version['data'])
# x.publish()
if self.status_id:
self.lumavate.put('/iot/v1/statuses/' + self.status_id, {'percent': 100})
else:
print('NOPE', flush=True)
return {'Status': 'Ok'}
def store_data(self, category, record_id, data, latitude=None, longitude=None, version='future'):
version_id = self.get_version_id(self.instance_id, version)
payload = {
'recordId': str(record_id),
'data': data,
'latitude': latitude,
'longitude': longitude
}
return self.lumavate.post(self.get_single_rest_uri(version_id) + '/data/' + category, payload)
def clear_data(self, category, version='future'):
version_id = self.get_version_id(self.instance_id, version)
return self.lumavate.delete(self.get_single_rest_uri(version_id) + '/data/' + category)
def get_current_version(self):
if hasattr(g, 'iot_context'):
return g.iot_context['token_data']['version']
else:
return 'future'
def get_data(self, category, default=[], qs=''):
version_id = self.get_version_id(self.instance_id, self.get_current_version())
res = self.lumavate.get('/iot/v1/widget-instances/' + str(self.instance_id) + '/versions/' + version_id + '/data/' + category + '?' + qs)
for x in res:
if 'distance' in x:
x['data']['distance'] = x['distance']
res = [x['data'] for x in res]
if len(res) == 0:
return default
else:
return res
def load_activation_info(self):
api_result = {}
try:
api_result = self.lumavate.get('/iot/v1/labels/' + str(g.iot_context['token_data'].get('activationId', 0)))
except Exception as e:
pass
return {
'key': api_result.get('key'),
'serialNumber': api_result.get('serialNumber')
}
def get_config_data(self):
# Check if there is a valid version for the current context
instance = self.rest_get_single(self.instance_id)
if instance[self.get_current_version() + 'Version'] is None:
raise Exception('Version ' + self.get_current_version() + ' does not exist for instance ' + str(self.instance_id))
result = instance[self.get_current_version() + 'Version']['data']
# Is there any activation to report?
result['activation'] = self.load_activation_info()
# Are there any 'special' pages that the UI should know about?
result['authCheck'] = None
if g.iot_context['token_data'].get('authUrl') is not None:
root, part, instance = g.iot_context['token_data']['authUrl'].rpartition('/')
result['authCheck'] = '{}/api/instances/{}/check-login-status'.format(root, instance)
return result
def default_image_data(self, data, prop):
return {
'preview': data.get(prop, {}).get('preview', '/icons/iot/page/api/instances/icons/no_image_available.png'),
'previewLarge': data.get(prop, {}).get('previewLarge', '/icons/iot/page/api/instances/icons/no_image_available.png'),
'previewMedium': data.get(prop, {}).get('previewMedium', '/icons/iot/page/api/instances/icons/no_image_available.png'),
'previewSmall': data.get(prop, {}).get('previewSmall', '/icons/iot/page/api/instances/icons/no_image_available.png')
}
def get_delta_document(self, original, current):
if original is None:
return current
else:
result = {}
for x in self.properties:
dd = x.delta_doc(original.get(x.name), current.get(x.name))
if dd is not None:
result[x.name] = dd
return result
def validate_data(self, data):
result = {}
instance_payload = {}
for x in self.properties:
if x.name.startswith('instance__'):
field = x.name.split('__')[-1]
new_val = x.read(data)
if self._instance[field] != new_val:
self._instance[field] = new_val
instance_payload[x.name.split('__')[-1]] = x.read(data)
else:
result[x.name] = x.read(data)
if len(instance_payload.keys()) > 0:
self.lumavate.put('/iot/v1/widget-instances/' + str(self.instance_id), instance_payload)
return result
def get_all_components(self):
return [
{
'label': x.instantiate().label,
'type': x.instantiate().component_type,
'icon': x.instantiate().icon_url,
'section': x.instantiate().section,
'category': x.instantiate().category
} for x in self.components
]
def get_component_properties(self, component_type):
comp = next((x.instantiate() for x in self.components if x.instantiate().component_type == component_type), None)
if comp:
return comp.get_properties()
def get_component_property(self, component_type, property_name):
comp = next((x.instantiate() for x in self.components if x.instantiate().component_type == component_type), None)
if comp:
return comp.get_property(property_name)
def get_widget_properties(self):
return [x.to_json() for x in self.properties]
def handle_language_fields(self, data):
lang = 'en-us'
if isinstance(data, dict):
if lang in data:
return self.handle_language_fields(data[lang])
else:
return { k: self.handle_language_fields(data[k]) for k in data }
if isinstance(data, list):
return [ self.handle_language_fields(x) for x in data ]
else:
return data
|
Lumavate-Team/python-hello
|
app/iot_api_core/instance_version_base.py
|
instance_version_base.py
|
py
| 12,129 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29358868643
|
from sqlalchemy.orm import joinedload
from clld.db.util import get_distinct_values
from clld.db.models import common
from clld.web import datatables
from clld.web.datatables.base import LinkCol, Col, LinkToMapCol
from clld.web.datatables.parameter import Parameters
from clld.web.datatables.value import Values, ValueNameCol
from clld_glottologfamily_plugin.models import Family
from clld_glottologfamily_plugin.datatables import FamilyCol
from plansa import models
class Languages(datatables.Languages):
def base_query(self, query):
return query.join(Family).options(joinedload(models.Variety.family)).distinct()
def col_defs(self):
return [
LinkCol(self, 'name'),
FamilyCol(self, 'Family', models.Variety),
Col(self,
'latitude',
sDescription='<small>The geographic latitude</small>'),
Col(self,
'longitude',
sDescription='<small>The geographic longitude</small>'),
LinkToMapCol(self, 'm'),
]
class Taxa(Parameters):
def col_defs(self):
res = [
LinkCol(self, 'name'),
Col(self, 'english', model_col=models.Taxon.name_english),
]
for label, col in [
('kingdom', models.Taxon.kingdom),
('phylum', models.Taxon.phylum),
('class', models.Taxon.class_),
('order', models.Taxon.order),
('family', models.Taxon.family),
#('genus', models.Taxon.genus),
]:
res.append(Col(self, label, model_col=col, choices=get_distinct_values(col)))
return res
class Names(Values):
def col_defs(self):
if self.language:
return [
ValueNameCol(self, 'value'),
LinkCol(self,
'parameter',
sTitle='Scientific name',
model_col=common.Parameter.name,
get_object=lambda i: i.valueset.parameter),
Col(self,
'english',
model_col=models.Taxon.name_english,
get_object=lambda i: i.valueset.parameter),
Col(self,
'spanish',
model_col=models.Taxon.name_spanish,
get_object=lambda i: i.valueset.parameter),
Col(self,
'portuguese',
model_col=models.Taxon.name_portuguese,
get_object=lambda i: i.valueset.parameter),
]
return Values.col_defs(self)
def includeme(config):
config.register_datatable('values', Names)
config.register_datatable('parameters', Taxa)
config.register_datatable('languages', Languages)
|
tsammalex/plansa
|
plansa/datatables.py
|
datatables.py
|
py
| 2,802 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5759869864
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
#%% Importar los datos (leer la imagen)
img = mpimg.imread('../Data/indice.png')
plt.imshow(img)
#%% reordenar la imagen en una sola tabla
d = img.shape
img_col = np.reshape(img,(d[0]*d[1],d[2]))
#%% Convertir los datos a media cero
media = img_col.mean(axis=0)
img_m = img_col - media
#%% obtener la matriz de covarianzas
img_cov = np.cov(img_m,rowvar=False)
#%% Obtener valores propios y vectores propios
w,v = np.linalg.eig(img_cov)
#%% Analizar los componentes principales
porcentaje = w/np.sum(w)
#%% Comprimir la imagen
componentes = w[0]
M_trans = np.reshape(v[:,0],(4,1))
img_new = np.matrix(img_m)*np.matrix(M_trans)
#%% Recuperar la imagen y visualizarla
img_recuperada = np.matrix(img_new)*np.matrix(M_trans.transpose())
img_recuperada = img_recuperada+media
img_r = img.copy()
for i in np.arange(4):
img_r[:,:,i] = img_recuperada[:,i].reshape((d[0],d[1]))
plt.subplot(121)
plt.imshow(img)
plt.subplot(122)
plt.imshow(img_r)
|
OscarFlores-IFi/CDINP19
|
code/p13.py
|
p13.py
|
py
| 1,039 |
python
|
es
|
code
| 0 |
github-code
|
6
|
72370696507
|
from django.urls import path
from products.views import (
CreateProduct,
CreateOption,
Products,
UpdateProduct,
Option,
UpdateOption,
)
urlpatterns = [
path("", Products.as_view()),
path("create/product", CreateProduct.as_view()),
path("update/product/<int:pk>", UpdateProduct.as_view()),
path("options", Option.as_view()),
path("create/option", CreateOption.as_view()),
path("update/option/<int:pk>", UpdateOption.as_view()),
]
|
ohnas/Manager-backend
|
products/urls.py
|
urls.py
|
py
| 478 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8595213301
|
import openpyxl,os,shutil
import pandas as pd
import pymssql
from sqlalchemy import create_engine
backupPath='files-backup'
def get_xlsx_to_dataframe(fliename):
wb = openpyxl.load_workbook(fliename)
sheets = wb.sheetnames
ws = wb.get_sheet_by_name(sheets[0])
df = pd.read_excel(fliename)
if not os.path.exists(backupPath):
os.mkdir(backupPath)
shutil.move(fliename,os.path.join(backupPath,os.path.basename(fliename)))
return df
def dataframe_to_mssql(df, tablename):
engine = create_engine('mssql+pymssql://sa:[email protected]/database?tds_version=7.0')
df.to_sql(tablename, engine, if_exists='append', index=False)
|
zjz7304/xlsx_to_database
|
util.py
|
util.py
|
py
| 666 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2687351802
|
import numpy as np
import matplotlib.pyplot as plt
import random as rnd
import turtle
N = 100
Nw = 1000
def wedrowniczek(length):
x0, y0 = 0,0
x,y = x0,y0
walkx,walky = [x],[y]
for i in range(length):
rand = rnd.randint(1,4)
if rand == 1:
x += 1
elif rand == 2:
y += 1
elif rand == 3:
x += -1
elif rand == 4:
y += -1
walkx.append(x)
walky.append(y)
return [walkx,walky]
lastPositions = []
for i in range(Nw):
walk = wedrowniczek(N)
lastPositions.append([walk[0][-1],walk[1][-1]])
plt.plot(walk[0],walk[1])
lastPositionInStart = 0
for lastPos in lastPositions:
if lastPos[0] == 0 and lastPos[-1] == 0:
lastPositionInStart += 1
print(lastPositionInStart)
plt.axis([-30,30,-30,30])
plt.grid()
plt.show()
|
filipmalecki94/Computer_modeling
|
lista3/zadanie1.py
|
zadanie1.py
|
py
| 784 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3081251304
|
# -*- coding: utf-8 -*
"""Graphics
.. module:: graphics
:synopsis: Module for creating graphs
"""
# imports
import matplotlib.pyplot as plt
import numpy as np
import resoncalc.output as output
from math import ceil
from csv import DictReader
# globals
n_points = 1000 # count of points in graph
n_rows = 20.0 # count of rows in graph legend
def plot_fem_base_function(func, a, b):
"""Plot base function
Args:
func (func): base function
a (float): left boundary of interval
b (float): right boundary of interval
"""
# coordinates init
x = np.linspace(np.real(a), np.real(b), n_points)
n = len(x)
y = np.zeros(n, dtype=float)
# calculate function values
for i in range(n):
y[i] = np.real(func(x[i]))
# create graph
plt.plot(x, y)
plt.title('FEM base function')
plt.tight_layout()
plt.show()
def plot_fem_base(funcs, a, b):
"""Plot base functions
Args:
funcs (list): base functions
a (float): left boundary of interval
b (float): right boundary of interval
"""
# coordinate init
x = np.linspace(np.real(a), np.real(b), n_points)
n = len(x)
# function loop
for i in range(len(funcs)):
# coordinate init
y = np.zeros(n, dtype=float)
# bridging function
if (type(funcs[i]) is list):
for j in range(n):
y[j] = np.real(funcs[i][0](x[j]) + funcs[i][1](x[j]))
# function within element
else:
for j in range(n):
y[j] = np.real(funcs[i](x[j]))
# plot function
plt.plot(x, y, label=i+2)
# create graph
plt.title('FEM base functions')
plt.legend(bbox_to_anchor=(1, 1), borderaxespad=0.0, ncol=ceil(len(funcs)/n_rows))
plt.tight_layout()
plt.show()
def plot_potential(potential, a, b, *params):
"""Plot potential
Args:
potential (func): potential function
a (float): left boundary of interval
b (float): right boundary of interval
params (args): potential specific parameters
"""
# coordinates init
x = np.linspace(np.real(a), np.real(b), n_points)
n = len(x)
y = np.zeros(n, dtype=float)
# plot potential
for i in range(n):
y[i] = potential(x[i], *params)
plt.plot(x, y)
# create graph
plt.title('Potential')
plt.xlabel('x')
plt.ylabel('E')
plt.grid()
plt.tight_layout()
plt.show()
def plot_eigenstates(potential, eigenvalues, a, b, emax, fname='', *params):
"""Plot eigenstates for given potential
Args:
potential (func): potential function
eigenvalues (list): bound states
a (float): left boundary of interval
b (float): right boundary of interval
emax (float): maximum energy in atomic units
fname (str): export filename
params (args): potential specific parameters
"""
# coordinates init
eigenvalues = np.real(eigenvalues)
x = np.linspace(a, b, n_points)
n = len(x)
y = np.zeros(n, dtype=float)
# plot potential
for i in range(n):
y[i] = potential(x[i], *params)
plt.plot(x, y)
# plot eigenstates
for i in range(len(eigenvalues)):
z = np.zeros(n, dtype=float)
val = eigenvalues[i]
bound_state = True if (val < 0.0) else False
cross = 0
# line segment representing eigenstate
for j in range(n):
# intersection points with potential
if (val > y[j]):
if (cross == 0 or (cross == 2 and not bound_state)):
cross += 1
elif (cross == 1):
cross += 1
# plot line
if ((bound_state and cross == 1) or (not bound_state and cross == 2)):
z[j] = val
else:
z[j] = np.nan
plt.plot(x, z, 'r', label='E{0} = {1:e}'.format(i+1, val))
# create graph
plt.xlim(left=a, right=b)
plt.ylim(bottom=None, top=emax)
plt.title('Eigenstates')
plt.xlabel('x')
plt.ylabel('E')
plt.grid()
plt.legend(bbox_to_anchor=(1, 1), borderaxespad=0.0, ncol=ceil(len(eigenvalues)/n_rows))
plt.tight_layout()
# export graph
if (len(fname) > 0):
plt.savefig(fname)
plt.close()
# display graph
else:
plt.show()
def plot_complex_spectrum(eigenvalues, eigenvalues2=[], states=None, fname=''):
"""Plot complex spectrum used in ECS method
Args:
eigenvalues (list): eigenvalues for first angle or rotation
eigenvalues2 (list): eigenvalues for second angle of rotation, default empty
states (list): highlighted eigenstates
fname (str): export filename
"""
# plot first eigenvalues
x = []
y = []
if (states is None):
x = np.real(eigenvalues)
y = np.imag(eigenvalues)
else:
limit = np.abs(states[0])
for val in eigenvalues:
if (np.real(val) <= limit):
x.append(np.real(val))
y.append(np.imag(val))
plt.plot(x, y, 'b.')
# plot second eigenvalues
x = []
y = []
if (len(eigenvalues2) > 0):
if (states is None):
x = np.real(eigenvalues2)
y = np.imag(eigenvalues2)
else:
limit = np.abs(states[0])
for val in eigenvalues2:
if (np.real(val) <= limit):
x.append(np.real(val))
y.append(np.imag(val))
plt.plot(x, y, 'g.')
# highlight eigenstates
if (states is not None):
for state in states:
plt.plot(np.real(state), np.imag(state), 'ro', label='real={0:e}, imag={1:e}'.format(np.real(state), np.imag(state)))
plt.legend(loc='lower left')
# create graph
plt.title('Complex spectrum')
plt.xlabel('Re')
plt.ylabel('Im')
plt.grid()
# export graph
if (len(fname) > 0):
plt.savefig(fname)
plt.close()
# display graph
else:
plt.show()
def plot_resonances_complex(infile, outfile=''):
"""Plot resonances in complex plane
Args:
infile (str): input filename
outfile (str): output filename, default empty
"""
# parse csv file
with open(infile, 'r') as f:
reader = DictReader(f)
states = {}
# get resonance states, group by param2
for row in reader:
if (row['type'] == 'resonance'):
param2 = row['param2']
if (not param2 in states):
states[param2] = []
states[param2].append(float(row['real']) + 1j*float(row['imag']))
# plot state trajectories
for k, v in states.items():
plt.plot(np.real(v), np.imag(v), 'b-')
# create graph
plt.title('Resonance states')
plt.xlabel('Re')
plt.ylabel('Im')
plt.grid()
# export graph
if (len(outfile) > 0):
plt.savefig(outfile)
plt.close()
# display graph
else:
plt.show()
def plot_resonances_params(infile, energy=True, outfile=''):
"""Plot resonances according to parameters
Args:
infile (str): input filename
energy (bool): energy or width, default energy
outfile (str): output filename, default empty
"""
# parse csv file
with open(infile, 'r') as f:
reader = DictReader(f)
param1, param2, value = [], [], []
# get resonance states
for row in reader:
if (row['type'] == 'resonance'):
param1.append(float(row['param1']))
param2.append(float(row['param2']))
value.append(float(row['real']) if (energy) else -0.5 * float(row['imag']))
# create graph
plt.scatter(param1, param2, c=value, cmap=plt.cm.rainbow)
plt.colorbar()
plt.title('Resonance states {0}'.format('energy' if (energy) else 'width'))
plt.xlabel('param a')
plt.ylabel('param b')
plt.grid()
# export graph
if (len(outfile) > 0):
plt.savefig(outfile)
plt.close()
# display graph
else:
plt.show()
|
hydratk/resoncalc
|
src/resoncalc/graphics.py
|
graphics.py
|
py
| 8,384 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75401661626
|
import numpy as np
import time
import torch
from multiprocessing import Array, Manager
from dgl.dataloading.dataloader import GraphCollator
from collections import deque, namedtuple
from gp.utils.datasets import DatasetWithCollate
"""
Following namedtuple makes data collating nad referencing easier
"""
GraphLabelNT = namedtuple("GraphLabelNT", ["g", "labels", "index"])
ReplayBatch = namedtuple(
"ReplayBatch",
["g", "plabel", "nlabel", "select_node", "move_node", "reward"],
)
GraphNMLabel = namedtuple("GraphNMLabel", ["g", "rlabel", "labels", "index"])
ReplayBatchHist = namedtuple(
"ReplayBatchHist",
[
"g",
"plabel",
"nlabel",
"select_node",
"move_node",
"reward",
"context",
],
)
ReplayBatchSM = namedtuple(
"ReplayBatchSM",
[
"g",
"plabel",
"nlabel",
"select_node",
"move_node",
"reward",
],
)
class GraphLabelDataset(DatasetWithCollate):
def __init__(self, graphs, labels, ind=None) -> None:
super().__init__()
self.graphs = graphs
self.labels = labels
if ind is None:
self.ind = np.arange(len(self.graphs))
else:
self.ind = ind
def __getitem__(self, index):
return GraphLabelNT(
self.graphs[index], np.array([self.labels[index]]), self.ind[index]
)
def __len__(self):
return len(self.graphs)
def get_collate_fn(self):
return GraphCollator().collate
class GraphNMDataset(DatasetWithCollate):
def __init__(self, graphs, labels, r_labels) -> None:
super().__init__()
self.graphs = graphs
self.labels = labels
self.rlabels = r_labels
def __getitem__(self, index):
return GraphNMLabel(
self.graphs[index],
np.array(self.rlabels[index]),
np.array([self.labels[index]]),
index,
)
def __len__(self):
return len(self.graphs)
def get_collate_fn(self):
return GraphCollator().collate
class GraphReplayBuffer:
def __init__(self, capacity):
# self.buffer = deque(maxlen=capacity)
self.capacity = capacity
self.buffer = np.empty(capacity, dtype=object)
self.size_count = 0
self.pointer = 0
def __len__(self):
return self.size_count
def add(self, experience):
if isinstance(experience, list):
ct = len(experience)
self.size_count += ct
next_pointer = self.pointer + ct
if next_pointer > self.capacity:
reminder = self.capacity - self.pointer
self.buffer[self.pointer :] = experience[:reminder]
self.pointer = 0
next_pointer = next_pointer - self.capacity
ct = next_pointer
experience = experience[reminder:]
self.buffer[self.pointer : next_pointer] = experience
self.pointer = next_pointer
self.size_count = min(self.size_count, self.capacity)
else:
self.buffer[self.size_count] = experience
# if (len(self) / self.capacity) > 1.3:
# self.buffer = self.buffer[-self.capacity :]
def sample(self, batch_size):
if len(self) < batch_size:
return None
indices = np.random.choice(len(self), batch_size, replace=True)
return [self.buffer[idx] for idx in indices]
def reset(self):
self.buffer = deque(maxlen=self.capacity)
class ReplayDataset(DatasetWithCollate):
def __init__(self, graphs, buffer, replay_size=1, replay_type=ReplayBatch):
super().__init__()
self.graphs = graphs
self.buffer = buffer
self.replay_size = replay_size
self.replay_type = replay_type
def __getitem__(self, index):
# print(len(self.buffer))
replay = self.buffer.sample(1)[0]
replay_graph = self.graphs[int(replay[0])]
return self.replay_type(replay_graph, *map(np.array, replay[1:]))
def __len__(self):
return len(self.graphs) * self.replay_size
def get_collate_fn(self):
return GraphCollator().collate
|
LechengKong/MAG-GNN
|
dataset.py
|
dataset.py
|
py
| 4,231 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18524960895
|
exp_name = 'cdr_0fold'
_base_ = [
'../_base_/models/late_integration_drp/base_deepCDR.py',
'../_base_/dataset/drp_dataset/drugs_genes_dataset.py',
'../_base_/exp_setting/base_setting.py',
'../_base_/default_runtime.py'
]
data = dict(
train=dict(
celllines_data='data/processed_raw_data/564_cellGraphs_exp_mu_cn_new.npy',
include_omic=['expr', 'mut', 'dna'],
),
val=dict(
celllines_data='data/processed_raw_data/564_cellGraphs_exp_mu_cn_new.npy',
include_omic=['expr', 'mut', 'dna'],
),
test=dict(
celllines_data='data/processed_raw_data/564_cellGraphs_exp_mu_cn_new.npy',
include_omic=['expr', 'mut', 'dna'],
),
)
custom_hooks = [
dict(type='TensorboardXHook',
priority=85,
log_dir='/data/xieyufeng/genes_drug_data/result/',
interval=5000,
exp_name=exp_name,
ignore_last=True,
reset_flag=False,
by_epoch=False
),
]
work_dir = f'workdir/{exp_name}'
|
yivan-WYYGDSG/AGMI
|
configs/deepcdr/cdr.py
|
cdr.py
|
py
| 1,014 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3025420311
|
import sys
file = str(sys.argv[1]) # file passed through command line
with open(file) as f:
input = f.read().split('\n')
def detector(data: list, size: int) -> list:
pos = []
for line in data:
for i in range(len(line)):
seq = line[i:i+size]
if len(set(seq)) == len(seq):
pos.append(i + size)
break
return pos
# 6.1
print(f'6.1: {detector(input, 4)[0]}')
# 6.2
print(f'6.2: {detector(input, 14)[0]}')
|
cclark20/aoc
|
solutions/day_06.py
|
day_06.py
|
py
| 483 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5564785730
|
from src.core.interfaces.model_abc import ModelAbstract
from src.core.interfaces.repository_abc import RepositoryAbstract
from src.shared import parse_config
from sqlalchemy.orm import Session
import logging
import logging.config
import dataclasses
from typing import List
from datetime import datetime
class SqlAlchemyRepository(RepositoryAbstract):
def __init__(self, session, object_type) -> None:
self.session: Session = session
self.object_type: ModelAbstract = object_type
logging.config.dictConfig(parse_config.get_logger_config())
self.logger = logging.getLogger("dev")
self.logger.debug("{} repository instanciated".format(object_type.__name__))
def get_by_filter(self, filter) -> ModelAbstract:
self.logger.info("get_by_filter Request")
query = self.session.query(self.object_type).filter_by(**filter)
result = self.session.execute(query).fetchall()
return result
def get_by_id(self, target_id: int) -> ModelAbstract:
"""This function retrieves an object from the database.
Return:
The object (ModelAbstract)
"""
self.logger.info("get_by_id Request")
result = self.session.query(self.object_type).get(target_id)
return result
def get_all(self) -> List[ModelAbstract]:
"""This function retrieves all the objects in the repository.
Returns:
A list of the objects (List[ModelAbstract])
"""
self.logger.info("get_all Request")
result = self.session.query(self.object_type).all()
return result
def delete_by_filter(self, filter) -> int:
"""This function deletes an object using the provided filter.
Returns:
Affected rows provided by the database engine (int)
"""
self.logger.info("delete_by_filter Request")
result = self.session.query(self.object_type).filter(filter).delete()
return result
def delete_by_id(self, target_id: int) -> int:
"""This function deletes an object by it's id.
Returns:
Affected rows provided by the database engine (int)
"""
self.logger.info("delete_by_id Request")
result = self.session.query(self.object_type).filter(self.object_type.id == target_id).delete()
self.session.commit()
return result
def insert(self, new: ModelAbstract) -> int:
"""This function inserts an object to the database.
Returns:
Inserted object id (int)
"""
self.logger.info("insert Request")
self.session.add(new)
self.session.commit()
return new.id
def update(self, values_to_update, requested_object) -> ModelAbstract:
self.logger.info("update Request")
for key, value in values_to_update:
if value is not None:
setattr(requested_object, key, value)
setattr(requested_object, "modified_at", datetime.now())
self.session.commit()
return requested_object
|
ricky-codes/APIGest
|
src/infrastructure/services/repository.py
|
repository.py
|
py
| 3,071 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17591759593
|
import pathlib
from PIL import Image
class image_to_c_array:
def __init__(self, image_path, output_path, format_bytes_count, char_array_name, include_header_guard=False, include_header_guard_name=None, reset_output_file=True):
self.image_path = image_path
self.output_path = output_path
self.file_bytes = pathlib.Path(image_path).read_bytes()
self.char_array_name = char_array_name
self.format_bytes_count = format_bytes_count
self.include_header_guard_name = include_header_guard_name
self.include_header_guard = include_header_guard
self.reset_output_file = reset_output_file
self.file_bytes_len = 0
for _b in self.file_bytes:
self.file_bytes_len += 1
def grab_string(self):
if self.include_header_guard:
data = "#ifndef {}\n#define {}\nunsigned char {}[{}] = {{\n\t".format(self.include_header_guard_name, self.include_header_guard_name, self.char_array_name,self.file_bytes_len)
else:
data = "unsigned char {}[{}] = {{\n\t".format(self.char_array_name, self.file_bytes_len)
count = 0
for x in self.file_bytes:
if count == self.format_bytes_count:
data += "\n\t"
count = 0
temp_data = str(hex(x))
if len(temp_data) == 3:
temp_data = temp_data.replace("0x", "0x0")
data += temp_data.upper() + ", "
count += 1
if self.include_header_guard_name:
data += "\n};\n#endif\n"
else:
data += "\n};\n\n"
return data
def save(self):
if self.reset_output_file:
open(self.output_path, 'w+').close()
with open(self.output_path, 'a+') as f:
f.write(self.grab_string())
|
0xRooted/File-To-C-Array
|
filetocarray.py
|
filetocarray.py
|
py
| 1,846 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26112891425
|
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "07/01/2019"
import os
import logging
import weakref
from . import qt
import silx.resources
from silx.utils import weakref as silxweakref
_logger = logging.getLogger(__name__)
"""Module logger"""
_cached_icons = None
"""Cache loaded icons in a weak structure"""
def getIconCache():
"""Get access to all cached icons
:rtype: dict
"""
global _cached_icons
if _cached_icons is None:
_cached_icons = weakref.WeakValueDictionary()
# Clean up the cache before leaving the application
# See https://github.com/silx-kit/silx/issues/1771
qt.QApplication.instance().aboutToQuit.connect(cleanIconCache)
return _cached_icons
def cleanIconCache():
"""Clean up the icon cache"""
_logger.debug("Clean up icon cache")
_cached_icons.clear()
_supported_formats = None
"""Order of file format extension to check"""
class AbstractAnimatedIcon(qt.QObject):
"""Store an animated icon.
It provides an event containing the new icon everytime it is updated."""
def __init__(self, parent=None):
"""Constructor
:param qt.QObject parent: Parent of the QObject
:raises: ValueError when name is not known
"""
qt.QObject.__init__(self, parent)
self.__targets = silxweakref.WeakList()
self.__currentIcon = None
iconChanged = qt.Signal(qt.QIcon)
"""Signal sent with a QIcon everytime the animation changed."""
def register(self, obj):
"""Register an object to the AbstractAnimatedIcon.
If no object are registred, the animation is paused.
Object are stored in a weaked list.
:param object obj: An object
"""
if obj not in self.__targets:
self.__targets.append(obj)
self._updateState()
def unregister(self, obj):
"""Remove the object from the registration.
If no object are registred the animation is paused.
:param object obj: A registered object
"""
if obj in self.__targets:
self.__targets.remove(obj)
self._updateState()
def hasRegistredObjects(self):
"""Returns true if any object is registred.
:rtype: bool
"""
return len(self.__targets)
def isRegistered(self, obj):
"""Returns true if the object is registred in the AbstractAnimatedIcon.
:param object obj: An object
:rtype: bool
"""
return obj in self.__targets
def currentIcon(self):
"""Returns the icon of the current frame.
:rtype: qt.QIcon
"""
return self.__currentIcon
def _updateState(self):
"""Update the object according to the connected objects."""
pass
def _setCurrentIcon(self, icon):
"""Store the current icon and emit a `iconChanged` event.
:param qt.QIcon icon: The current icon
"""
self.__currentIcon = icon
self.iconChanged.emit(self.__currentIcon)
class MovieAnimatedIcon(AbstractAnimatedIcon):
"""Store a looping QMovie to provide icons for each frames.
Provides an event with the new icon everytime the movie frame
is updated."""
def __init__(self, filename, parent=None):
"""Constructor
:param str filename: An icon name to an animated format
:param qt.QObject parent: Parent of the QObject
:raises: ValueError when name is not known
"""
AbstractAnimatedIcon.__init__(self, parent)
qfile = getQFile(filename)
self.__movie = qt.QMovie(qfile.fileName(), qt.QByteArray(), parent)
self.__movie.setCacheMode(qt.QMovie.CacheAll)
self.__movie.frameChanged.connect(self.__frameChanged)
self.__cacheIcons = {}
self.__movie.jumpToFrame(0)
self.__updateIconAtFrame(0)
def __frameChanged(self, frameId):
"""Callback everytime the QMovie frame change
:param int frameId: Current frame id
"""
self.__updateIconAtFrame(frameId)
def __updateIconAtFrame(self, frameId):
"""
Update the current stored QIcon
:param int frameId: Current frame id
"""
if frameId in self.__cacheIcons:
icon = self.__cacheIcons[frameId]
else:
icon = qt.QIcon(self.__movie.currentPixmap())
self.__cacheIcons[frameId] = icon
self._setCurrentIcon(icon)
def _updateState(self):
"""Update the movie play according to internal stat of the
MovieAnimatedIcon."""
self.__movie.setPaused(not self.hasRegistredObjects())
class MultiImageAnimatedIcon(AbstractAnimatedIcon):
"""Store a looping QMovie to provide icons for each frames.
Provides an event with the new icon everytime the movie frame
is updated."""
def __init__(self, filename, parent=None):
"""Constructor
:param str filename: An icon name to an animated format
:param qt.QObject parent: Parent of the QObject
:raises: ValueError when name is not known
"""
AbstractAnimatedIcon.__init__(self, parent)
self.__frames = []
for i in range(100):
try:
frame_filename = os.sep.join((filename, ("%02d" %i)))
frame_file = getQFile(frame_filename)
except ValueError:
break
try:
icon = qt.QIcon(frame_file.fileName())
except ValueError:
break
self.__frames.append(icon)
if len(self.__frames) == 0:
raise ValueError("Animated icon '%s' do not exists" % filename)
self.__frameId = -1
self.__timer = qt.QTimer(self)
self.__timer.timeout.connect(self.__increaseFrame)
self.__updateIconAtFrame(0)
def __increaseFrame(self):
"""Callback called every timer timeout to change the current frame of
the animation
"""
frameId = (self.__frameId + 1) % len(self.__frames)
self.__updateIconAtFrame(frameId)
def __updateIconAtFrame(self, frameId):
"""
Update the current stored QIcon
:param int frameId: Current frame id
"""
self.__frameId = frameId
icon = self.__frames[frameId]
self._setCurrentIcon(icon)
def _updateState(self):
"""Update the object to wake up or sleep it according to its use."""
if self.hasRegistredObjects():
if not self.__timer.isActive():
self.__timer.start(100)
else:
if self.__timer.isActive():
self.__timer.stop()
def getWaitIcon():
"""Returns a cached version of the waiting AbstractAnimatedIcon.
:rtype: AbstractAnimatedIcon
"""
return getAnimatedIcon("process-working")
def getAnimatedIcon(name):
"""Create an AbstractAnimatedIcon from a resource name.
The resource name can be prefixed by the name of a resource directory. For
example "silx:foo.png" identify the resource "foo.png" from the resource
directory "silx".
If no prefix are specified, the file with be returned from the silx
resource directory with a specific path "gui/icons".
See also :func:`silx.resources.register_resource_directory`.
Try to load a mng or a gif file, then try to load a multi-image animated
icon.
In Qt5 mng or gif are not used, because the transparency is not very well
managed.
:param str name: Name of the icon, in one of the defined icons
in this module.
:return: Corresponding AbstractAnimatedIcon
:raises: ValueError when name is not known
"""
key = name + "__anim"
cached_icons = getIconCache()
if key not in cached_icons:
qtMajorVersion = int(qt.qVersion().split(".")[0])
icon = None
# ignore mng and gif in Qt5
if qtMajorVersion != 5:
try:
icon = MovieAnimatedIcon(name)
except ValueError:
icon = None
if icon is None:
try:
icon = MultiImageAnimatedIcon(name)
except ValueError:
icon = None
if icon is None:
raise ValueError("Not an animated icon name: %s", name)
cached_icons[key] = icon
else:
icon = cached_icons[key]
return icon
def getQIcon(name):
"""Create a QIcon from its name.
The resource name can be prefixed by the name of a resource directory. For
example "silx:foo.png" identify the resource "foo.png" from the resource
directory "silx".
If no prefix are specified, the file with be returned from the silx
resource directory with a specific path "gui/icons".
See also :func:`silx.resources.register_resource_directory`.
:param str name: Name of the icon, in one of the defined icons
in this module.
:return: Corresponding QIcon
:raises: ValueError when name is not known
"""
cached_icons = getIconCache()
if name not in cached_icons:
qfile = getQFile(name)
icon = qt.QIcon(qfile.fileName())
cached_icons[name] = icon
else:
icon = cached_icons[name]
return icon
def getQPixmap(name):
"""Create a QPixmap from its name.
The resource name can be prefixed by the name of a resource directory. For
example "silx:foo.png" identify the resource "foo.png" from the resource
directory "silx".
If no prefix are specified, the file with be returned from the silx
resource directory with a specific path "gui/icons".
See also :func:`silx.resources.register_resource_directory`.
:param str name: Name of the icon, in one of the defined icons
in this module.
:return: Corresponding QPixmap
:raises: ValueError when name is not known
"""
qfile = getQFile(name)
return qt.QPixmap(qfile.fileName())
def getQFile(name):
"""Create a QFile from an icon name. Filename is found
according to supported Qt formats.
The resource name can be prefixed by the name of a resource directory. For
example "silx:foo.png" identify the resource "foo.png" from the resource
directory "silx".
If no prefix are specified, the file with be returned from the silx
resource directory with a specific path "gui/icons".
See also :func:`silx.resources.register_resource_directory`.
:param str name: Name of the icon, in one of the defined icons
in this module.
:return: Corresponding QFile
:rtype: qt.QFile
:raises: ValueError when name is not known
"""
global _supported_formats
if _supported_formats is None:
_supported_formats = []
supported_formats = qt.supportedImageFormats()
order = ["mng", "gif", "svg", "png", "jpg"]
for format_ in order:
if format_ in supported_formats:
_supported_formats.append(format_)
if len(_supported_formats) == 0:
_logger.error("No format supported for icons")
else:
_logger.debug("Format %s supported", ", ".join(_supported_formats))
for format_ in _supported_formats:
format_ = str(format_)
filename = silx.resources._resource_filename('%s.%s' % (name, format_),
default_directory='gui/icons')
qfile = qt.QFile(filename)
if qfile.exists():
return qfile
_logger.debug("File '%s' not found.", filename)
raise ValueError('Not an icon name: %s' % name)
|
silx-kit/silx
|
src/silx/gui/icons.py
|
icons.py
|
py
| 11,642 |
python
|
en
|
code
| 106 |
github-code
|
6
|
38076886961
|
import os
from pathlib import Path
from tempfile import NamedTemporaryFile
import pytest
import pytest_mock
from werkzeug.exceptions import NotFound
from iceart.models import Artist, ArtistDto, ArtistViewModel
def test_artist_vm_init():
with pytest.raises(NotFound):
ArtistViewModel(-1)
def test_artist_vm_search_key():
assert ArtistViewModel(44).search_key() == {"_id": 44}
def test_artist_init():
# Arrange
data = {"_id": 44, "title": "t", "info": "i", "file": "f", "paintings": [5, 3, 77]}
# Act
model = Artist(data)
# Assert
assert model.identity == 44
assert model.title == "t"
assert model.info == "i"
assert model.file == "f"
assert model.paintings == [5, 3, 77]
def test_artist_dto_init():
# Arrange
data = {"_id": 44, "title": "t", "info": "i", "file": "f", "paintings": [5, 3, 77]}
model = Artist(data)
with NamedTemporaryFile("w+b", delete=False) as tmp_file:
tmp_file.write(b"\xab\xef")
p = Path(tmp_file.name)
# Act
with pytest_mock.mock.patch("pathlib.Path.joinpath", return_value=p):
dto = ArtistDto(model, {1: "img1", 2: "img2"})
# Assert
assert dto.image == "q+8="
assert dto.id == 44
assert dto.title == "t"
assert dto.info == "i"
assert dto.as_json() == {
"id": 44,
"title": "t",
"info": "i",
"image": "q+8=",
"paintings": {1: "img1", 2: "img2"},
}
assert dto.paintings == {1: "img1", 2: "img2"}
# Cleanup
os.remove(p)
|
JonSteinn/iceart_api
|
tests/test_models/test_artist.py
|
test_artist.py
|
py
| 1,537 |
python
|
en
|
code
| 1 |
github-code
|
6
|
35522645807
|
import requests
from time import sleep
timeToWait = 300 # Time to wait between callouts (in seconds)
while (True):
# Get list of commands to run this callout
URL = "https://slack.flemingcaleb.com:5000/api/agent/4/command/"
r = requests.get(url=URL)
if r.status_code == requests.codes.ok:
# Process the list of requests
print(r)
elif r.status_code == requests.codes.not_found:
# No list this time
print("No list this time")
else:
#Handle Unintended Error
print("ERROR")
sleep(timeToWait)
|
flemingcaleb/InfraBot
|
agent/agent.py
|
agent.py
|
py
| 567 |
python
|
en
|
code
| 3 |
github-code
|
6
|
72330655547
|
# TEE RATKAISUSI TÄHÄN:
def summa(luku: int):
# kun luku on 1, ei ole muita summattavia...
if luku <= 1:
return luku
return luku + summa(luku - 1)
if __name__ == "__main__":
tulos = summa(3)
print(tulos)
print(summa(5))
print(summa(10))
|
jevgenix/Python_OOP
|
osa11-14_rekursiivinen_summa/src/rekursiivinen_summa.py
|
rekursiivinen_summa.py
|
py
| 278 |
python
|
fi
|
code
| 4 |
github-code
|
6
|
39756253957
|
"""
Plot a grid on H2
with Poincare Disk visualization.
"""
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import geomstats.visualization as visualization
from geomstats.geometry.hyperbolic import Hyperbolic
H2 = Hyperbolic(dimension=2)
METRIC = H2.metric
def main(left=-128,
right=128,
bottom=-128,
top=128,
grid_size=32,
n_steps=512):
starts = []
ends = []
for p in np.linspace(left, right, grid_size):
starts.append(np.array([top, p]))
ends.append(np.array([bottom, p]))
for p in np.linspace(top, bottom, grid_size):
starts.append(np.array([p, left]))
ends.append(np.array([p, right]))
starts = [H2.intrinsic_to_extrinsic_coords(s) for s in starts]
ends = [H2.intrinsic_to_extrinsic_coords(e) for e in ends]
ax = plt.gca()
for start, end in zip(starts, ends):
geodesic = METRIC.geodesic(initial_point=start,
end_point=end)
t = np.linspace(0, 1, n_steps)
points_to_plot = geodesic(t)
visualization.plot(
points_to_plot, ax=ax, space='H2_poincare_disk', marker='.', s=1)
plt.show()
if __name__ == "__main__":
if os.environ['GEOMSTATS_BACKEND'] == 'tensorflow':
logging.info('Examples with visualizations are only implemented '
'with numpy backend.\n'
'To change backend, write: '
'export GEOMSTATS_BACKEND = \'numpy\'.')
else:
main()
|
hhajri/geomstats
|
examples/plot_grid_h2.py
|
plot_grid_h2.py
|
py
| 1,552 |
python
|
en
|
code
| null |
github-code
|
6
|
1004630370
|
"""
DESKRIPSI SOAL
Buatlah program yang menerima 3 buah input nilai dan outputkan jumlah maksimal
dari 2 bilangannya ! diantara ketiga input tersebut.
PETUNJUK MASUKAN
Input terdiri atas 3 angka dalam 1 baris
PETUNJUK KELUARAN
Outputkan angka jumlah terbesar dari 2 angka
"""
a, b, c = list(map(int, input().split()))
sum_values = 0
if a >= b:
if b >= c: # a > b > c
sum_values = a + b
else: # a, c, b
sum_values = a + c
else: # b > a
if a >= c: # b > a > c
sum_values = a + b
else: # b > c > a
sum_values = b + c
print(sum_values)
|
refeed/PAlgoritmaTRPLA
|
OKT_22_2020/uts_problem_i.py
|
uts_problem_i.py
|
py
| 595 |
python
|
id
|
code
| 0 |
github-code
|
6
|
74056199547
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .activations import ACTIVATIONS
class Embedding(nn.Module):
'''
Abstract base class for any module that embeds a collection of N vertices into
N hidden states
'''
def __init__(self, features, hidden, **kwargs):
super().__init__()
self.features = features
self.hidden = hidden
def forward(self, x):
pass
class Constant(Embedding):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, x):
return x
class OneLayer(Embedding):
def __init__(self, features, hidden, act=None, wn=False, **kwargs):
super().__init__(features, hidden)
self.fc = nn.Linear(features, hidden)
if wn:
self.fc = nn.utils.weight_norm(self.fc, name='weight')
self.activation = ACTIVATIONS[act]()
def forward(self, x):
return self.activation(self.fc(x))
class TwoLayer(Embedding):
def __init__(self, features, hidden, act=None, wn=False, **kwargs):
super().__init__(features, hidden)
self.e1 = OneLayer(features, hidden, act, wn)
self.e2 = OneLayer(hidden, hidden, act, wn)
def forward(self, x):
return self.e1(self.e2(x))
class NLayer(nn.Module):
def __init__(self, dim_in=None, dim_out=None, n_layers=None, dim_hidden=None, act=None, wn=False, **kwargs):
super().__init__()
self.activation = ACTIVATIONS[act]()
if dim_hidden is None:
dim_hidden = dim_out
dims = [dim_in] + [dim_hidden] * (n_layers-1) + [dim_out]
self.fcs = nn.ModuleList()
for i in range(len(dims)-1):
fc = nn.Linear(dims[i], dims[i+1])
if wn:
fc = nn.utils.weight_norm(fc, name='weight')
self.fcs.append(fc)
def forward(self, x):
for fc in self.fcs:
x = fc(x)
x = self.activation(x)
return x
EMBEDDINGS = dict(
n=NLayer,
one=OneLayer,
two=TwoLayer,
const=Constant,
)
|
isaachenrion/jets
|
src/architectures/embedding/embedding.py
|
embedding.py
|
py
| 2,081 |
python
|
en
|
code
| 9 |
github-code
|
6
|
31146266095
|
import tensorflow as tf
import keras.backend as K
def huber_loss(y_true, y_pred):
return tf.losses.huber_loss(y_true, y_pred)
def adjust_binary_cross_entropy(y_true, y_pred):
return K.binary_crossentropy(y_true, K.pow(y_pred, 2))
def MMD_Loss_func(num_source, sigmas=None):
if sigmas is None:
sigmas = [1, 5, 10]
def loss(y_true, y_pred):
cost = []
for i in range(num_source):
for j in range(num_source):
domain_i = tf.where(tf.equal(y_true, i))[:, 0]
domain_j = tf.where(tf.equal(y_true, j))[:, 0]
single_res = mmd_two_distribution(K.gather(y_pred, domain_i),
K.gather(y_pred, domain_j),
sigmas=sigmas)
cost.append(single_res)
#print("wtf")
cost = K.concatenate(cost)
return K.mean(cost)
return loss
def mmd_two_distribution(source, target, sigmas):
sigmas = K.constant(sigmas)
xy = rbf_kernel(source, target, sigmas)
xx = rbf_kernel(source, source, sigmas)
yy = rbf_kernel(target, target, sigmas)
return xx + yy - 2 * xy
def rbf_kernel(x, y, sigmas):
beta = 1. / (2. * (tf.expand_dims(sigmas, 1)))
dist = compute_pairwise_distances(x, y)
dot = -K.dot(beta, K.reshape(dist, (1, -1)))
exp = K.exp(dot)
return K.mean(exp, keepdims=True)
def compute_pairwise_distances(x, y):
norm = lambda x: K.sum(K.square(x), axis=1)
return norm(K.expand_dims(x, 2) - K.transpose(y))
|
rs-dl/MMD-DRCN
|
customLoss.py
|
customLoss.py
|
py
| 1,575 |
python
|
en
|
code
| 7 |
github-code
|
6
|
14255581243
|
from selenium import webdriver
import time, re
from bs4 import BeautifulSoup
import pyautogui
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
import pyperclip
import os
# 主要功能就是访问300mium所有影片详情页,然后挨个下载封面
class Crawl_51luxu:
def main(self, Dir='F:\\pic\\', page=1, category='300MIUM'):
current_path = os.getcwd().replace('\\', '/') + '/'
# custom_path = 'F:\\pic\\300MIUM\\'
custom_path = Dir + category + "\\"
chrome_opts = webdriver.ChromeOptions()
chrome_opts.add_argument("--headless")
chrome_opts.add_experimental_option(
'excludeSwitches', ['enable-logging'])
url = 'https://www.51luxu.com/category/sresource/' + category + '/page/' + str(page)
def open_browser(url):
driver = webdriver.Chrome(options=chrome_opts)
driver.get(url)
return driver
def scrapy(driver):
if not os.path.exists(Dir):
os.mkdir(Dir)
if not os.path.exists(custom_path):
os.mkdir(custom_path)
Exist = []
if os.path.exists(custom_path + 'history.txt'):
with open(custom_path + 'history.txt','r+') as f:
lines = f.readlines()
for line in lines:
Exist.append(line.replace("\n",""))
f.close()
# 从history中读入历史的所下载的图片的名字,以免重复下载
# 这一步主要是为了,当我筛选图片时,看到好看的要保留,看到不好看的要删除
# 那么读取文件列表就乱了套了,所以把历史下载保存在txt文件中,就知道之前有没有下过这个番号了
for page in range(1,100):
try:
content = driver.page_source.encode('utf-8')
soup = BeautifulSoup(content, 'lxml')
img = soup.find_all('img')
src1 = re.findall(r'src=".*?"', str(img))
name1 = re.findall(r'alt=".*?"', str(img))
src2 = []
name2 = []
for i in src1:
src2.append(i.split('=')[1].replace("\"",""))
for i in name1:
name2.append(i.split('=')[1].replace("\"", ""))
if category == "Scute":
pattern = "S-cute"
else:
pattern = category
try:
temp = [x.replace("inggo.info", "paypp.xyz") for x in src2]
src3 = [x for x in temp if 'images.paypp.xyz/wp-content/uploads' in x]
except:
src3 = [x for x in src2 if 'images.paypp.xyz/wp-content/uploads' in x]
name3 = [x for x in name2 if pattern in x]
if len(name3) < 12:
name3 = name2
# 上面是name3和src3 保存了主页面的番号和相应的详情页的链接
# 接下来启动第二个浏览器对各个详情页的视频截图进行抓取
driver1 = webdriver.Chrome(options=chrome_opts)
for i in range(len(src3)):
try:
if '[' and '【' not in name3[i]:
title = name3[i]
else:
title = name3[i].split('【')[1].split('】')[0] # 简化一下番号的名字
except:
title = name3[i].split('[')[1].split(']')[0]
if i >= 1:
try:
if name3[i].split('[')[1].split(']')[0] == name3[i-1].split('[')[1].split(']')[0]:
title = name3[i].split(']')[1].replace("[","")
except:
pass
if i >= 1:
try:
if name3[i].split('【')[1].split('】')[0] == name3[i-1].split('【')[1].split('】')[0]:
title = name3[i].split('】')[1].replace("【","")
except:
pass
if title in Exist:
print("%s 已经下载!" % (title))
continue
# 前文提到的判断是否下过,如果是,后面就不用进行了
# 进入相应链接的详情页
driver1.get(src3[i])
img = driver1.find_element_by_xpath("//html/body/img")
img.screenshot(custom_path + title + '.jpg')
# wait = WebDriverWait(driver1, 10) # 等待浏览器相应,删除也可以
# pyautogui.rightClick(x=500, y=500) # 右击图片,位置可根据自己的屏幕调整
# pyautogui.typewrite(['V']) # 另存为的快捷键为 V
# time.sleep(2) # 等待电脑响应
# pyperclip.copy(custom_path + title + '.jpg') # 复制文件名加路径到粘贴板
# time.sleep(1)
# pyautogui.hotkey('ctrlleft', 'V') # 粘贴
# time.sleep(1)
# pyautogui.press('enter') # 确认
# time.sleep(1)
while True:
filelist = os.listdir(custom_path)
if title + '.jpg' in filelist:
with open(custom_path + 'history.txt', 'a+') as f:
f.writelines(title)
f.writelines('\n')
f.close()
print("%s 下载完成!" % (title))
break
else:
print("等待响应")
time.sleep(2)
# pyautogui.hotkey('ctrlleft', 'V') # 粘贴
# time.sleep(1)
# pyautogui.press('enter') # 确认
# time.sleep(1)
# 在txt中加入当前下载的图片名字
print("%s 下载完成!"%(title))
time.sleep(0.5)
driver1.quit()
print("第 %d 页爬完"%(page))
button = "//*[@class='next page-numbers']" #翻页按钮
driver.find_elements_by_xpath(button)[0].click()
except:
print("第 %d 页出错!"%(page))
driver1.quit()
try:
button = "//*[@class='next page-numbers']" #翻页按钮
driver.find_elements_by_xpath(button)[0].click()
except:
print("爬取完毕!")
break
continue
driver = open_browser(url)
time.sleep(2)
scrapy(driver)
|
ExcaliburEX/GHS
|
Crawl_51luxu.py
|
Crawl_51luxu.py
|
py
| 7,503 |
python
|
en
|
code
| 5 |
github-code
|
6
|
35573581070
|
from models.resnet import ResNet
from models.unet import UNet, UNet_dualHeads
model_zoo = {
'UNet': UNet,
'UNet_dualHeads': UNet_dualHeads
}
def get_model(cfg):
########################### COMPUTE INPUT & OUTPUT CHANNELS ############################
print("Satellites: ", cfg.DATA.satellites)
print("num_classes:", cfg.MODEL.NUM_CLASS)
# cfg.MODEL.NUM_CLASS = cfg.MODEL.cfg.MODEL.NUM_CLASS
INPUT_CHANNELS_DICT = {}
INPUT_CHANNELS_LIST = []
for sat in cfg.DATA.satellites:
INPUT_CHANNELS_DICT[sat] = len(list(cfg.DATA.input_bands[sat]))
if cfg.DATA.stacking: INPUT_CHANNELS_DICT[sat] = len(cfg.DATA.prepost) * INPUT_CHANNELS_DICT[sat]
INPUT_CHANNELS_LIST.append(INPUT_CHANNELS_DICT[sat])
########################### MODEL SELECTION ############################
if cfg.MODEL.ARCH in model_zoo.keys():
INPUT_CHANNELS = sum(INPUT_CHANNELS_LIST)
MODEL = model_zoo[cfg.MODEL.ARCH]
return MODEL(INPUT_CHANNELS,
num_classes=cfg.MODEL.NUM_CLASS,
topo=cfg.MODEL.TOPO,
use_deconv=cfg.MODEL.USE_DECONV) #'FC-EF'
|
puzhao8/sentinel2-burn-severity
|
models/__init__.py
|
__init__.py
|
py
| 1,171 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72010622268
|
#Stack using Linked List
class StackNode:
def __init__(self,data):
self.data=data
self.next=None
class Stack:
def __init__(self):
self.root=None
def isempty(self):
return True if self.root is None else False
def push(self,data):
new_node=StackNode(data)
new_node.next=self.root
self.root=new_node
print("pushed")
def pop(self):
curr=self.root
if curr is None:
return
else:
self.root=curr.next
curr=None
return
def prints(self):
curr=self.root
while curr:
print(curr.data,end='-->')
curr=curr.next
def top(self):
return self.root.data
def rev(self):
while not self.isempty():
x=self.top()
print(x,end='')
x=self.pop()
s=Stack()
print(s.isempty())
s.push('h')
s.push('e')
s.push('m')
s.push('a')
#s.pop()
#s.prints()
s.top()
s.rev()
|
Hemasri-3/datastructures_in_python
|
stack.py
|
stack.py
|
py
| 1,018 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15451649393
|
#!/bin/python
import csv
import urllib
import sys
if __name__ == "__main__":
with open('payphone_set.csv', 'rb') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
if row[0] == 'ID':
continue
number = "+" + row[0]
neighborhood = row[7]
lat = row[5]
lon = row[6]
params = urllib.urlencode({'pay_phone[number]': number,
'pay_phone[neighborhood]': neighborhood,
'pay_phone[lat]': lat,
'pay_phone[lon]': lon})
urllib.urlopen("http://sfpayphones.urbansmore.com/pay_phones", params)
#sys.exit(0)
|
emilyville/SFpayphones
|
upload.py
|
upload.py
|
py
| 575 |
python
|
en
|
code
| 4 |
github-code
|
6
|
33205173219
|
from django.urls import path
from .views import (get_user, add_user, get_categories, get_recipe,
add_to_favourite, get_user_favourites,
get_recipes_in_category, get_random_recipe,
add_to_dislikes)
urlpatterns = [
path('users/<int:telegram_id>', get_user),
path('users/add/', add_user),
path('categories/', get_categories),
path('category/recipes/', get_recipes_in_category),
path('recipe/random/', get_random_recipe),
path('favourites/recipe/', get_recipe),
path('favourites/', get_user_favourites),
path('favourites/add', add_to_favourite),
path('dislikes/add', add_to_dislikes)
]
|
AlexanderZharyuk/recipes
|
recipes_admin_api/api/urls.py
|
urls.py
|
py
| 684 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32371727829
|
def main():
for cont in range(0, 44):
with open("C:\\Users\\p22.ribeiro\\OneDrive\\repocode\\challenge-python\\dataset\\{}".format(cont), "r", encoding="cp1252") as arquivo:
frase = arquivo.read()
for remove in '!@#$%&*()<>:;,./?\|][}{=+-"~£':
frase = frase.replace(remove, ' ').lower()
lista_frase = frase.split()
clear_list = set(lista_frase)
clear_list = sorted(clear_list)
comprimento = len(clear_list)
# print(lista_frase)
# print(comprimento)
indice = 0
dicionario_palavra = dict()
while indice < comprimento:
for palavra in clear_list:
dicionario_palavra = [indice, palavra]
print(dicionario_palavra) # Tirar este print
indice += 1
main()
|
htmribeiro/challenger-python
|
others/reverse_index.py
|
reverse_index.py
|
py
| 770 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
24206813915
|
import webapp2
import jinja2
import os
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
template_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir), autoescape=True)
def to_render(template, **para):
t = template_env.get_template(template)
return t.render(para)
def blog_key(name='default'):
return db.Key.from_path('blog', name)
class Art(db.Model):
title = db.StringProperty(required = True)
arc = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
def render(self):
self._render_text = self.arc.replace('\n', '<br>')
return to_render('post.html', p = self)
class BaseHandler(webapp2.RequestHandler):
def render(self, template, **para):
self.response.out.write(to_render(template, **para))
class FrontPage(BaseHandler):
def get(self):
arts = []
arts = db.GqlQuery("select * from Art order by created DESC")
self.render("frontPage.html", arts = arts)
class ShowPost(BaseHandler):
def get(self, post_id):
key = db.Key.from_path('Art', int(post_id), parent=blog_key())
post = db.get(key)
self.render('permanlink.html', post = post)
"""Problem:
1. redirect('/blog') don't refresh the page
2. how add id colum to db automatiaclly increase
3. miss click on the title jump to a new page
"""
class NewPost(BaseHandler):
def get(self):
self.render("newPost.html")
def post(self):
title = self.request.get('title')
arc = self.request.get('arc')
if title and arc.strip():
a = Art(parent=blog_key(), title = title, arc = arc)
a.put()
self.redirect('/blog/%s' % str(a.key().id()))
else:
self.render("newPost.html", title=title, arc = arc, error="Content insufficiency")
app = webapp2.WSGIApplication([('/blog/?', FrontPage), ('/blog/newpost', NewPost), ('/blog/([0-9]+)', ShowPost)],
debug=True)
|
tongtie/udacity
|
WebDevelopment/hw3/my_solution.py
|
my_solution.py
|
py
| 2,089 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13058259505
|
from datetime import datetime, timezone
from config import Config
from logger import logger
from model.coordinates import Coordinates
from model.media_type import MediaType
from model.ts_source import TsSource
class MediaFile:
original_path: str
original_filename: str
filename: str
media_type: MediaType
dir_path: list[str]
unix_time_sec: int = None
timestamp: datetime = None
ts_source: TsSource = None
mtime: datetime = None
index: int = None
coordinates: Coordinates = None
md5: str = None
target_dir = None
target_filename = None
target = None
def __init__(self, original_path: str, original_filename: str, filename: str, media_type: MediaType,
dir_path: list[str]):
self.original_path = original_path
self.original_filename = original_filename
self.filename = filename
self.media_type = media_type
self.dir_path = dir_path
def __repr__(self):
if self.ts_source == TsSource.EXIF:
sym = Config.SYM_CHECK
else:
sym = Config.SYM_MULTIPLICATION
return f'{sym} {self.ts_source} {self.dir_path}/{self.original_filename}, ts={self.timestamp}'
def __lt__(self, other):
return self.original_filename.lower() < other.original_filename.lower()
def has_timestamp(self) -> bool:
return self.timestamp is not None
def update_time(self, ts: datetime, source: TsSource, force: bool = False):
if ts is not None and (force or self.timestamp is None or ts < self.timestamp):
self.timestamp = ts.astimezone(timezone.utc)
self.unix_time_sec = int(self.timestamp.timestamp())
self.ts_source = source
if self.ts_source == TsSource.MTIME:
self.mtime = ts
def update_coordinates(self, coordinates: Coordinates):
if coordinates is not None:
self.coordinates = coordinates
@staticmethod
def create(source_dir: str, file_dir: str) -> 'MediaFile':
original_filename, filename, media_type, split_path = MediaFile.retrieve_filename_data(source_dir, file_dir)
return MediaFile(
original_path=file_dir,
original_filename=original_filename,
filename=filename,
media_type=media_type,
dir_path=split_path
)
@staticmethod
def retrieve_filename_data(source_dir: str, file_dir: str):
if not source_dir.endswith('/'):
source_dir += '/'
split_path = file_dir.replace(source_dir, '').split('/')
original_filename = split_path.pop()
fixed_filename = MediaFile.replace_dots(original_filename)
filename, extension = fixed_filename.split('.')
media_type = MediaType.from_string(extension)
return original_filename, filename, media_type, split_path
@staticmethod
def replace_dots(original_filename: str) -> str:
result = original_filename
while result.count('.') > 1:
result = result.replace('.', ' ', 1)
return result
|
mbogner/imagination
|
model/media_file.py
|
media_file.py
|
py
| 3,104 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19482945267
|
import math
import random
import numba as nb
import numpy as np
@nb.jit(nopython=True)
def dimension_selector_uniform(n_dimensions):
return random.randrange(n_dimensions)
def get_dimension_selector_expovariate(
lambd=None,
rel_lambd=None,
):
if lambd is not None and rel_lambd is not None:
raise ValueError("Cannot set both lambd and rel_lambd")
if lambd is None and rel_lambd is None:
# the default, using a rel_lambd of 4.0, placing the pseudo-mean
# 1/4 of the way into the dimensions
rel_lambd = 4.
@nb.jit(nopython=True)
def dimension_selector_expovariate(n_dimensions):
nonlocal lambd, rel_lambd
value = math.inf
while value >= n_dimensions:
value = random.expovariate(
lambd if lambd is not None else rel_lambd/n_dimensions
)
return int(value)
return dimension_selector_expovariate
def get_finder_for_cluster_obeying(
check_func,
min_count=1,
max_count=-1,
max_depth=-1,
dimension_selector=dimension_selector_uniform,
fixed_dimensional_parameters=-1,
fixed_non_dimensional_parameters=-1,
fixed_n=-1,
verbose=False,
jit_kwargs={},
):
@nb.jit(nopython=True, **jit_kwargs)
def _find_cluster_obeying(
dimensional_parameters,
non_dimensional_parameters,
random_seed=None,
iterations=-1,
):
if dimensional_parameters.shape[1] != non_dimensional_parameters.shape[0]:
raise ValueError(
"Minor dimension of dimensional_parameters must match "
"major dimension of non_dimensional_parameters"
)
if (
fixed_dimensional_parameters != -1
and fixed_dimensional_parameters != dimensional_parameters.shape[0]
):
raise ValueError("Number of dimensional parameters not expected value")
if (
fixed_non_dimensional_parameters != -1
and fixed_non_dimensional_parameters != non_dimensional_parameters.shape[1]
):
raise ValueError("Number of non-dimensional parameters not expected value")
if (
fixed_n != -1
and fixed_n != non_dimensional_parameters.shape[0]
):
raise ValueError("Number of candidates not expected value")
final_max_depth = max_depth if max_depth != -1 else (1 + int(
math.floor(math.log(dimensional_parameters.shape[-1]) / math.log(2))
))
if final_max_depth < 2:
raise ValueError("max_depth < 2 makes no sense")
if random_seed is not None:
random.seed(random_seed)
bitmap_stack = np.zeros(
(final_max_depth, dimensional_parameters.shape[-1]),
dtype=np.bool_,
)
bitmap_stack[0,:] = True
right_branch_stack = np.zeros((final_max_depth,), dtype=np.int8)
current_level = 1
iteration = 0
while True:
if right_branch_stack[current_level] == 0:
chosen_dimension = dimension_selector(dimensional_parameters.shape[0])
vals_count = 0
# initialize these to any value of the correct type
vals_min = vals_max = dimensional_parameters[0,0]
# scan for range of remaining values in this dimension
for i in range(dimensional_parameters.shape[1]):
if bitmap_stack[current_level-1,i]:
v = dimensional_parameters[chosen_dimension,i]
if vals_count == 0:
vals_min = vals_max = v
else:
vals_min = min(vals_min, v)
vals_max = max(vals_max, v)
vals_count+=1
chosen_split_point = random.uniform(vals_min, vals_max)
# mark values greater than threshold
remaining_count = 0
for i in range(dimensional_parameters.shape[1]):
if bitmap_stack[current_level-1,i]:
is_chosen = (
dimensional_parameters[chosen_dimension,i] >= chosen_split_point
)
bitmap_stack[current_level,i] = is_chosen
if is_chosen:
remaining_count+=1
elif right_branch_stack[current_level] == 1:
# invert current_level's bitmap, masked by the previous level's
remaining_count = 0
for i in range(bitmap_stack.shape[1]):
if bitmap_stack[current_level-1,i]:
is_chosen = not bitmap_stack[current_level,i]
bitmap_stack[current_level,i] = is_chosen
if is_chosen:
remaining_count+=1
else:
# tidy up then unwind
right_branch_stack[current_level] = 0
bitmap_stack[current_level,:] = False
if current_level > 1:
current_level-=1
# advance branch at underlying level
right_branch_stack[current_level]+=1
else:
# we're at the root
iteration+=1
if iterations != -1 and iteration >= iterations:
return None
# start again by continuing at the
# same current_level
continue
if verbose:
print("current_level = ", current_level, " remaining_count = ", remaining_count)
if remaining_count < min_count:
right_branch_stack[current_level]+=1
continue
if max_count == -1 or remaining_count <= max_count:
ndp_subset = np.empty(
(remaining_count, non_dimensional_parameters.shape[1],),
dtype=non_dimensional_parameters.dtype,
)
j = 0
for i in range(bitmap_stack.shape[1]):
if j < remaining_count and bitmap_stack[current_level,i]:
for k in range(ndp_subset.shape[1]):
ndp_subset[j,k] = non_dimensional_parameters[i,k]
j+=1
check_result = check_func(ndp_subset)
if check_result:
if check_result > 0:
return bitmap_stack[current_level,:]
else:
# negative result signals to stop checking this branch
right_branch_stack[current_level]+=1
continue
if remaining_count <= 1:
# dividing any more makes no sense
right_branch_stack[current_level]+=1
continue
if current_level+1 >= final_max_depth:
# can't descend any deeper
right_branch_stack[current_level]+=1
continue
current_level+=1
return _find_cluster_obeying
|
risicle/cluscheck
|
cluscheck/__init__.py
|
__init__.py
|
py
| 7,246 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18339770466
|
import requests
import json
url = "https://api.telegram.org/bot5653233459:AAHWejZRnvy4luWTetBSbQY5jTzS11mA35U/sendMessage"
photo_url = "https://api.telegram.org/bot5653233459:AAHWejZRnvy4luWTetBSbQY5jTzS11mA35U/sendPhoto"
document_url = "https://api.telegram.org/bot5653233459:AAHWejZRnvy4luWTetBSbQY5jTzS11mA35U/sendDocument"
def buttons(chat_id):
payload = {
"photo": "https://blog.mint.com/wp-content/uploads/2013/02/1.jpg",
#"caption": caption,
"chat_id": chat_id,
"reply_markup": {
"inline_keyboard": [
[
{
"text": "Show Plans",
"callback_data": "Insurance Plan"
}
]
]
}
}
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
response = requests.post(photo_url, json=payload)
def button_1(chat_id):
payload = {
"photo": "AgACAgUAAxkBAAIGv2Mhrr21NIyhQKZHdVnObrNS0_SdAAIStTEb3U4IVSXi2GZ7s3TqAQADAgADcwADKQQ",
"chat_id": chat_id,
# "text": "Select one option",
"reply_markup": {
"inline_keyboard": [
[
{
"text": "Term Life",
"callback_data": "Term Life"
},
{
"text": "mediclaim",
"callback_data": "Mediclaim"
},
{
"text": "Accidental",
"callback_data": "Accidental Insurance"
},
]
]
}
}
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
response = requests.post(photo_url, json=payload)
print(response)
def term_life(chat_id):
payload = {
"chat_id": chat_id,
'document': 'BQACAgUAAxkBAAIClGMIdmbOEsi8xsAic-Bk0UahnIl5AAJCBgACDBJBVBLbNwoHbsUhKQQ',
#"text": "Choose Your Plan",
"reply_markup": {
"inline_keyboard": [
[
{
"text": "25Lac",
"callback_data": "25Lac"
},
{
"text": "50Lac",
"callback_data": "50Lac"
},
{
"text": "75Lac",
"callback_data": "75Lac"
},
{
"text": "1Cr",
"callback_data": "1r"
},
]
]
}
}
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
response = requests.post(document_url, json=payload)
def mediclaim(chat_id):
payload = {
"chat_id": chat_id,
#"text": "Choose Your Plan",
"document":"BQACAgUAAxkBAAICmmMIeJtMVL1v2JTsRhsdMb4uSDvCAAJEBgACDBJBVKwG4QUSEWW7KQQ",
"reply_markup": {
"inline_keyboard": [
[
{
"text": "2Lac",
"callback_data": "mediclaim up to 2Lac"
},
{
"text": "5Lac",
"callback_data": "mediclaim up to 5Lac"
},
{
"text": "10Lac",
"callback_data": "mediclaim up to 10Lac"
},
{
"text": "25Lac",
"callback_data": "mediclaim up to 25Lac"
},
]
]
}
}
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
response = requests.post(document_url, json=payload)
def accidental(chat_id):
payload = {
"chat_id": chat_id,
#"text": "Choose Your Plan",
"document":"BQACAgUAAxkBAAICmGMIeJMERoZF4atIdkk_L-cYGESUAAJDBgACDBJBVCJlGZUyjyGkKQQ",
"reply_markup": {
"inline_keyboard": [
[
{
"text": "5Lac",
"callback_data": "Plan of 5Lac"
},
{
"text": "10Lac",
"callback_data": "Plan of 10Lac"
},
{
"text": "25Lac",
"callback_data": "Plan of 25Lac"
},
{
"text": "50Lac",
"callback_data": "Plan of 50Lac"
},
]
]
}
}
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
response = requests.post(document_url, json=payload)
def existing_user(chat_id):
payload = {
"chat_id": chat_id,
"text": "Seems you are an existing user! To see your details click here ",
"reply_markup": {
"inline_keyboard": [
[
{
"text": "Show Details",
"callback_data": "show details"
}
]
]
}
}
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
response = requests.post(url, json=payload, headers=headers)
#print(response.text)
# import requests
# url = "https://api.telegram.org/bot5340261920:AAF2GGInKosubny7ox-CWeZyl8IMESgQg5o/sendPhoto"
# payload = {
# "photo": "AgACAgUAAxkBAAMLYvDs4xqN8GzQGUY555yHLr5joacAAjGxMRtMw4hX56puCadfo3cBAAMCAAN5AAMpBA",
# # "caption": "Optional",
# # "disable_notification": False,
# "reply_to_message_id": 0,
# "chat_id":1091996976
# }
# headers = {
# "Accept": "application/json",
# "User-Agent": "Telegram Bot SDK - (https://github.com/irazasyed/telegram-bot-sdk)",
# "Content-Type": "application/json"
# }
# response = requests.post(url, json=payload)
# print(response.text)
|
mayuritoro/tele_bot
|
tele_bot/trial.py
|
trial.py
|
py
| 5,973 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34228298510
|
import os
import re
import sys
import glob
import builtins
from contextlib import contextmanager
import setuptools
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
from setuptools.command.install import install as _install
with open("README.md", "r") as fh:
long_description = fh.read()
PACKAGENAME = "aliad"
VERSIONFILE = f"{PACKAGENAME}/_version.py"
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
setup(
name=PACKAGENAME, # Replace with your own username
version=verstr,
author="Alkaid Cheng",
author_email="[email protected]",
description="A library for anomaly detection.",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
package_data={PACKAGENAME: []},
exclude_package_data={PACKAGENAME: []},
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
install_requires=[
'numpy',
'pandas',
'matplotlib',
'click',
'quickstats'
],
scripts=[f'bin/{PACKAGENAME}'],
python_requires='>=3.8',
)
|
AlkaidCheng/aliad
|
setup.py
|
setup.py
|
py
| 1,655 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20052609139
|
import numpy as np
import matplotlib.pyplot as plot
import math
def jacobi(x):
x1, x2 = x
return np.array([[1, math.sin(x2)], [-math.cos(x1), 1]])
def func(x):
x1, x2 = x
return np.array([x1 - math.cos(x2) - 1, x2 - math.sin(x1) - 1])
def find_delta_x(f, J):
return np.linalg.solve(J, -1 * f)
def newthon(x_start, eps):
x_curr = x_start
x_next = []
iters = 0
is_close = False
while True:
print(f'x_{iters} = \n{x_curr}')
f_x = func(x_curr)
J_x = jacobi(x_curr)
print(f'f(x_{iters}) = \n {f_x}')
print(f'J(x_{iters}) = \n {J_x}')
x_next = x_curr + find_delta_x(f_x, J_x)
print(f'x_{iters + 1} = \n{x_next}')
diff = np.linalg.norm(x_next - x_curr, ord=np.inf)
print(f'||x_{iters + 1} - x_{iters}|| = {diff}')
if diff <= eps:
print(f'Found solution in {iters} steps')
break
x_curr = x_next
iters += 1
return x_next, iters
def ph(x):
x1, x2 = x
return np.array([[1 + math.cos(x2)], [1 + math.sin(x1)]])
def ph_der(x):
x1, x2 = x
return np.array([[0, -math.sin(x2)], [math.cos(x1), 0]])
def simple_iters(x_range, phi, phi_deriv, eps):
x_curr = np.array([[np.mean(x)] for x in x_range])
x_next = []
iters = 0
print(f'x is = {x_range}')
x_series = [np.linspace(x[0], x[1]) for x in x_range]
q = max(map(lambda x: np.linalg.norm(phi_deriv(x), ord=np.inf), zip(*x_series)))
print(f'q = {q}')
if q >= 1:
print("q should be < 1")
exit()
coeff = q / (1 - q)
print(f'coeff = {coeff}')
while True:
print(f'x_{iters} = \n{x_curr}')
x_next = phi(x_curr)
print(f'x_{iters + 1} = \n{x_next}')
diff = np.linalg.norm(x_next - x_curr, ord=np.inf) * coeff
print(f'||x_{iters + 1} - x_{iters}|| * coeff = {diff}')
if diff <= eps:
print(f'Found solution in {iters} steps')
break
x_curr = x_next
iters += 1
return x_next, iters
def main():
# x_series = np.arange(-3.0, 3.1, 0.1)
# f1_series = [(lambda x2: math.cos(x2) + 1)(x) for x in x_series]
# f2_series = [(lambda x1: math.sin(x1) + 1)(x) for x in x_series]
# plot.figure()
# plot.xlabel('x1')
# plot.ylabel('x2')
# plot.plot(x_series, f1_series, 'r')
# plot.plot(f2_series, x_series,'g')
# plot.axis('equal')
# plot.grid(True)
# plot.show()
# 1.5 <= x1 <= 2
# 0.7 <= x2 <= 0.92
# (1.73, 0.83)
eps = float(input())
x_start = np.array([[1.7], [0.75]])
root_n, iters_n = newthon(x_start, eps)
x_range = [(1.7, 1.8), (0.75, 0.85)]
root_si, iters_si = simple_iters(x_range, ph, ph_der, eps)
print(f'Newthon: root is:\n{root_n}\nfound in {iters_n} iterations')
print(f'Simple iterations: root is:\n{root_si}\nfound in {iters_si} iterations')
print(f'Are roots close? {np.allclose(root_n, root_si, atol=eps)}')
main()
|
mehakun/Labs
|
6th_semester/NumMethods/2_lab/task_2.py
|
task_2.py
|
py
| 3,004 |
python
|
en
|
code
| 2 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.