seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
37063009817
|
import cinn
import numpy as np
import time
from cinn import runtime
# from PIL import Image
# def getimg(fname):
# image=Image.open(fname)
# image= np.array(image)
# return image
# print(getimg("/root/RemoteWorking/huazhibin.webp"))
def randomImgNCHW(w,h,c=3,n=1):
return np.random.randint(0,255,(n,c,h,w))
def runCinn(C,args,n="matmul"):
stages = cinn.create_stages([C])
target = cinn.Target()
builder = cinn.Module.Builder(n, target)
func = cinn.lower(n, stages, [A.to_tensor(), B.to_tensor(), C])
print(func)
builder.add_function(func)
module = builder.build()
jit = cinn.ExecutionEngine()
jit.link(module)
exefn = jit.lookup(n)
exefn(args)
m = cinn.Expr(4)
n = cinn.Expr(3)
k = cinn.Expr(2)
A = cinn.Placeholder("float32", "A", [m, k])
B = cinn.Placeholder("float32", "B", [k, n])
C = cinn.compute([
m, n
], lambda v: A(v[0],v[1]) + B(v[0],v[1]), "C")
a = runtime.cinn_buffer_t(
np.arange(1,m.int()*k.int()+1).reshape(m.int(), k.int()).astype("float32"),
runtime.cinn_x86_device)
b = runtime.cinn_buffer_t(
np.arange(1,k.int()*n.int()+1).reshape(k.int(), n.int()).astype("float32"),
runtime.cinn_x86_device)
c = runtime.cinn_buffer_t(
np.zeros([m.int(), n.int()]).astype("float32"), runtime.cinn_x86_device)
args = [runtime.cinn_pod_value_t(_) for _ in [a, b, c]]
runCinn(C,args)
npa = a.numpy()
npb = b.numpy()
npc = c.numpy().astype("int32")
npr =np.random.randint(0,255,(6,8,3))
# print("a:",npa,npa.shape)
# print("b:",npb,npb.shape)
# print("c:",npc,npc.shape)
# print("npr:",npr)
t = randomImgNCHW(8,6)
print("npr-transpose:",t)
|
qlogcn/CINN
|
tutorials/resize_dev.py
|
resize_dev.py
|
py
| 1,668 |
python
|
en
|
code
| null |
github-code
|
6
|
33942211702
|
import uvicorn
import datetime
from loguru import logger
from fastapi import FastAPI
from sqlalchemy import select
from fastapi.middleware.cors import CORSMiddleware
from SAGIRIBOT.ORM.AsyncORM import orm
from SAGIRIBOT.Core.AppCore import AppCore
from SAGIRIBOT.command_parse.Commands import *
from SAGIRIBOT.ORM.AsyncORM import Setting, FunctionCalledRecord
app = FastAPI(docs_url=None, redoc_url=None)
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get('/getGroups')
async def getGroups():
groups = await orm.fetchall(select(Setting.group_id, Setting.group_name).where(Setting.active == True))
return [{"value": group[0], "label": group[1]} for group in groups]
@app.get('/getGroupSetting')
async def getGroupSetting(groupId: int):
options_bool = ["repeat", "frequency_limit", "setu", "real", "real_high_quality", "bizhi", "r18", "img_search",
"bangumi_search", "debug", "compile", "anti_revoke", "online_notice", "switch"]
options_str = ["long_text_type", "r18_process", "speak_mode", "music"]
valid_str_option_value = {
"long_text_type": LongTextType.valid_values,
"r18_process": R18Process.valid_values,
"speak_mode": SpeakMode.valid_values,
"music": Music.valid_values
}
bool_result = await orm.fetchone(select(
Setting.repeat,
Setting.frequency_limit,
Setting.setu, Setting.real, Setting.real_high_quality, Setting.bizhi, Setting.r18,
Setting.img_search,
Setting.bangumi_search,
Setting.debug,
Setting.compile,
Setting.anti_revoke,
Setting.online_notice,
Setting.switch
).where(
Setting.group_id == groupId
))
str_result = await orm.fetchone(select(
Setting.long_text_type,
Setting.r18_process,
Setting.speak_mode,
Setting.music
).where(
Setting.group_id == groupId
))
return [
[{"label": options_bool[i], "value": bool_result[i]} for i in range(len(bool_result))],
[{"label": options_str[i], "value": str_result[i], "validValue": valid_str_option_value[options_str[i]]} for i in range(len(str_result))]
]
@app.get('/modifyGroupSetting')
async def modifyGroupSetting(groupId: int, settingName: str, newValue):
if newValue in ["true", "false"]:
newValue = True if newValue == "true" else False
try:
await orm.update(
Setting,
[Setting.group_id == groupId],
{"group_id": groupId, settingName: newValue}
)
except Exception as e:
logger.error(f"api error: {e}")
return False
return True
@app.get("/getStatus")
async def getStatus():
return {
"functionCalled": len(await orm.fetchall(
select(FunctionCalledRecord).where(FunctionCalledRecord.time >= datetime.date.today())
)),
"handlerCount": len(AppCore.get_core_instance().get_group_chains()),
"sayaCount": len(AppCore.get_core_instance().get_saya_channels())
}
def run_api():
uvicorn.run(app, host="127.0.0.1", port=8000, log_level="error")
|
m310n/sagiri-bot
|
WebManager/web_manager.py
|
web_manager.py
|
py
| 3,231 |
python
|
en
|
code
| null |
github-code
|
6
|
40224803539
|
import sys
from PyQt6.QtWidgets import QApplication, QWidget, QLabel
from PyQt6.QtGui import QPixmap, QFont
class MainWindow(QWidget):
def __init__(self):
"""constructor for Empty windows """
super().__init__()
self.initializeUI()
def initializeUI(self):
"""Set up the application"""
# setGeometry()两个参数用于指定窗口显示的位置, 后两个参数用于指定窗口大小
self.setGeometry(500, 500, 250, 400)
self.setWindowTitle("User profile")
self.setupMainWindow()
self.show()
def creatImageLabels(self):
"""Create Qlabel to be displayed in the main windows"""
skyblue_path = '../Beginning-PyQt-resource/Chapter02/images/skyblue.png'
try:
with open(skyblue_path):
skyblue = QLabel(self)
pixmap = QPixmap(skyblue_path)
skyblue.setPixmap(pixmap)
except FileNotFoundError as error:
print(f"Image not found. \nError: {error}")
profile_path = '../Beginning-PyQt-resource/Chapter02/images/profile_image.png'
try:
with open(profile_path):
profile = QLabel(self)
pixmap = QPixmap(profile_path)
profile.setPixmap(pixmap)
profile.move(80, 20)
except FileNotFoundError as error:
print(f"Image not found. \nError: {error}")
def setupMainWindow(self):
self.creatImageLabels()
user_label = QLabel(self)
user_label.setText("John Doe")
user_label.setFont(QFont("Arial", 20))
user_label.move(85, 140)
bio_label = QLabel(self)
bio_label.setText("Biography")
bio_label.setFont(QFont("Arial", 17))
bio_label.move(15, 170)
about_label = QLabel(self)
about_label.setText("I'm a software Engineer with 10 years\
exprience creating awesome code!")
about_label.setWordWrap(True) # set auto line
about_label.move(15, 190)
# Run the program
app = QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec())
|
grant-Gan/programing_learn
|
pyqt6_learn/ch2-Building_a_simple_GUI/user_profile.py
|
user_profile.py
|
py
| 2,148 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29582028401
|
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
import logging
from odoo import api, models, _
_logger = logging.getLogger(__name__)
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.multi
def action_auto_open(self):
return_item = super(AccountInvoice, self).action_auto_open()
# action_send_account_invoice_create_message_slack
for item in self:
item.action_send_account_invoice_create_message_slack()
# return
return return_item
@api.multi
def action_send_account_invoice_create_message_slack(self):
self.ensure_one()
web_base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
url_item = '%s/web?#id=%s&view_type=form&model=account.invoice' % (
web_base_url,
self.id
)
attachments = [
{
"title": _('Invoice has been created automatically'),
"text": self.number,
"color": "#36a64f",
"fallback": "View invoice %s %s" % (
self.number,
url_item
),
"actions": [
{
"type": "button",
"text": _("View invoice %s") % self.number,
"url": url_item
}
],
"fields": [
{
"title": _("Customer"),
"value": self.partner_id.name,
'short': True,
},
{
"title": _("Origin"),
"value": self.origin,
'short': True,
}
],
}
]
vals = {
'attachments': attachments,
'model': 'account.invoice',
'res_id': self.id,
'channel': self.env['ir.config_parameter'].sudo().get_param(
'slack_log_contabilidad_channel'
),
}
self.env['slack.message'].sudo().create(vals)
|
OdooNodrizaTech/slack
|
slack_sale_orders_generate_invoice/models/account_invoice.py
|
account_invoice.py
|
py
| 2,180 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36092775168
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Sources of authentication from the request.
Each source is a :func:`callable`.
The source is called with the request and returns ``None`` if it cannot extract informations
or a dict with the values it has got.
The first source returning a non ``None`` value is used for the providers.
"""
import urlparse
from napixd.http.response import HTTPError
class CatchAllSource(object):
def __call__(self, request):
return {
'is_secure': False
}
class SecureAuthProtocol(object):
"""
Implements the secure-auth provider.
The *Authorization* header of the requests is checked.
"""
def __init__(self):
self._mandatory = frozenset(['path', 'host', 'method'])
def __call__(self, request):
if 'Authorization' not in request.headers:
return None
msg, l, signature = request.headers['Authorization'].rpartition(':')
if l != ':':
return None
content = urlparse.parse_qs(msg)
for x in content:
content[x] = content[x][0]
missing_keys = self._mandatory.difference(content)
if missing_keys:
raise HTTPError(403, 'Missing authentication data: {0}'.format(
', '.join(missing_keys)))
content.update({
'msg': msg,
'signature': signature,
})
return content
class NonSecureAuthProtocol(object):
"""
Implements the Non-secure authentication protocol.
A token in the GET parameters is checked.
"""
@classmethod
def from_settings(cls, settings):
token = settings.get('get_parameter', 'token')
return cls(token)
def __init__(self, token):
self.token = token
def __call__(self, request):
if self.token not in request.GET:
return None
login, l, signature = request.GET[self.token].partition(':')
if l != ':':
raise HTTPError(401, 'Incorrect NAPIX non-secure Authentication')
return {
'login': login,
'signature': signature,
'msg': login,
'is_secure': False
}
|
napix/NapixServer
|
napixd/auth/sources.py
|
sources.py
|
py
| 2,197 |
python
|
en
|
code
| 1 |
github-code
|
6
|
30477193038
|
"""
Sample Input:
Enter rotational copies: 5
Enter sides per polygon: 4
Enter edge pixel length: 100
Enter row range start: -200
Enter row range end: 200
Enter row range increment: 400
Enter col range start: -250
Enter col range end: 250
Enter col range increment: 250
"""
import turtle
import random
def draw_polygon(t,spp,epl):
for j in range(spp):
t.forward(epl)
t.left(360/spp)
def draw_rotational_polygons(t,spp,epl,rc):
for i in range(rc):
draw_polygon(t,spp,epl)
t.left(360/rc)
def draw_full_shape(t,spp,epl,rc):
epl2 = epl
while epl2 > 1:
t.pensize(5)
t.color(1,1,1)
draw_rotational_polygons(t,spp,epl2,rc)
epl2 = ((epl2)/2)
epl2 = epl
while epl2 > 1:
t.pensize(1)
t.color(0,0,0)
draw_rotational_polygons(t,spp,epl2,rc)
epl2 = ((epl2)/2)
def draw_grids(t,spp,epl,rc,rrs,rre,rri,crs,cre,cri):
rows = int((rre - rrs)/rri)+1
columns = int((cre - crs)/cri)+1
col=crs
row=rrs
for y in range(rows):
for x in range(columns):
t.penup()
t.goto(col,row)
t.pendown()
draw_full_shape(t,spp,epl,rc)
col = col+cri
col = crs
row = row+rri
def main():
background_red = 0
background_green = 0
background_blue = 0
while background_red == 0 and background_green == 0 and background_blue == 0:
background_red = random.choice([0.0,0.125,0.25,0.375,0.5])
background_green = random.choice([0.0,0.125,0.25,0.375,0.5])
background_blue = random.choice([0.0,0.125,0.25,0.375,0.5])
print("Random background color is: (", background_red, ",", background_green, ",", background_blue, ")")
wn = turtle.Screen()
wn.bgcolor(background_red, background_green, background_blue)
murtle = turtle.Turtle()
murtle.hideturtle()
murtle.speed(0)
rotational_copies = int(input("Enter rotational copies: "))
sides_per_polygon = int(input("Enter sides per polygon: "))
edge_pixel_length = int(input("Enter edge pixel length: "))
row_range_start = int(input("Enter row range start: "))
row_range_end = int(input("Enter row range end: "))
row_range_increment = int(input("Enter row range increment: "))
col_range_start = int(input("Enter col range start: "))
col_range_end = int(input("Enter col range end: "))
col_range_increment = int(input("Enter col range increment: "))
#row range is bottom/top bound of centers of shapes
#row increment is vertical space between centers of shapes
#col range is left/right bound of centers of shapes
#col increment is horizontal space between centers of shapes
draw_grids(murtle,sides_per_polygon,edge_pixel_length,rotational_copies,row_range_start,row_range_end,row_range_increment,col_range_start,col_range_end,col_range_increment)
print("Click turtle screen to exit...")
wn.exitonclick()
if __name__ == "__main__":
main()
|
CedarCollins/Introduction-to-Computer-Programming
|
Turtle Polygon Rotations Grid.py
|
Turtle Polygon Rotations Grid.py
|
py
| 3,011 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7571022590
|
import numpy as np
import logging
import sys
import pkg_resources
import pytz
import datetime
import os
import re
from numpy import cos,sin
# Get the version
version_file = pkg_resources.resource_filename('pynortek','VERSION')
# Setup logging module
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
logger = logging.getLogger('pynortek')
def xyz2enu(u,v,w,head,pitch,roll,inverse=False):
"""
Transforms velocities in XYZ coordinates to ENU, or vice versa if
inverse=True. Transformation is done according to the Nortek
convention
"""
# convert to radians
hh = np.pi*(head-90)/180
pp = np.pi*pitch/180
rr = np.pi*roll/180
ut = np.zeros(np.shape(u))
vt = np.zeros(np.shape(u))
wt = np.zeros(np.shape(u))
for i in range(len(head)):
# generate heading matrix
H = np.matrix([[cos(hh[i]), sin(hh[i]), 0],[-sin(hh[i]), cos(hh[i]), 0],[0, 0, 1]])
# generate combined pitch and roll matrix
P = [[cos(pp[i]), -sin(pp[i])*sin(rr[i]), -cos(rr[i])*sin(pp[i])],
[0, cos(rr[i]), -sin(rr[i])],
[sin(pp[i]), sin(rr[i])*cos(pp[i]), cos(pp[i])*cos(rr[i])]]
R = H*P
#print(R)
if(inverse):
R = np.inv(R)
# do transformation
ut[i] = R[0,0]*u[i] + R[0,1]*v[i] + R[0,2]*w[i];
vt[i] = R[1,0]*u[i] + R[1,1]*v[i] + R[1,2]*w[i];
wt[i] = R[2,0]*u[i] + R[2,1]*v[i] + R[2,2]*w[i];
return [ut,vt,wt]
raw_data_files = ['.prf','.vec'] # Names of raw binary data files
class pynortek():
"""A Nortek parsing object
Author: Peter Holtermann ([email protected])
Usage:
>>>filename='test'
>>>aquadopp = pynortek(filename)
"""
def __init__(self,filename, verbosity=logging.DEBUG, timezone=pytz.UTC):
"""
"""
logger.setLevel(verbosity)
self.timezone = timezone
self.deployment = os.path.split(filename)[-1]
self.fpath = os.path.split(filename)[0]
self.rawdata = {}
print(self.deployment)
print(self.fpath)
filename_hdr = filename + '.hdr'
logger.debug('Trying to open header file: ' + filename_hdr)
try:
fhdr = open(filename_hdr)
except Exception as e:
logger.warning('Could not open header file, exiting')
return
header = self.parse_header(fhdr)
self.header = header
print(header)
print('Loading files')
for fread in header['files']:
print(fread)
IS_RAW = False
for rawname in raw_data_files:
if(rawname in fread.lower()):
IS_RAW=True
if(IS_RAW == False):
print('Loading ' + fread)
suffix = fread.split('.')[-1]
fname_tmp = os.path.join(self.fpath,fread)
print(fname_tmp)
data_tmp = np.loadtxt(fname_tmp)
self.rawdata[suffix] = data_tmp
# Process the raw data just loaded
self.process_rawdata()
def parse_header(self,fhdr):
""" Parses a nortek header file
"""
header = {}
datefmt = '%d.%m.%Y %H:%M:%S'
header_field = None
header['files'] = []
while True:
l = fhdr.readline()
if(len(l) == 0):
break
# Find all files to be read
if((l[0] == '[')):
ftmp = l.split("\\")[-1].replace(']','').replace('\n','')
header['files'].append(ftmp)
# If we have a sensor file, check position of fields
if('.sen' in l[-7:]):
print('Sensor file')
header_field = 'sensors'
header[header_field] = {}
# Transducer distance
if(('Beam' in l) and ('Vertical' in l)):
print('Transducer distance')
header_field = 'distance'
header[header_field] = {'cell':[],'beam':[],'vertical':[]}
continue
# Check for the header field
if('User setup' in l):
print('User setup')
header_field = 'User setup'
header[header_field] = {}
elif('Hardware configuration' in l):
print('Hardware configuration')
header_field = 'Hardware configuration'
header[header_field] = {}
elif('Head configuration' in l):
header_field = 'Head configuration'
header[header_field] = {}
#print(l)
if(header_field is not None): # Check if field is over (one empty line)
if(len(l) <= 2):
print('Header ' + header_field + ' over')
header_field = None
# Check for a one line list
ind = l.find(' ')
if(ind >= 0):
if('Number of measurements' in l):
header['Number of measurements'] = int(l.split()[-1])
elif('Coordinate system' in l):
header['Coordinate system'] = l.split()[-1]
logger.debug('Coordinate system found: ' + header['Coordinate system'])
elif('Horizontal velocity range' in l):
header['Horizontal velocity range'] = float(l.split()[-2])
logger.debug('Horizontal velocity range: ' + str(header['Horizontal velocity range']))
elif('Vertical velocity range' in l):
header['Vertical velocity range'] = float(l.split()[-2])
logger.debug('Vertical velocity range: ' + str(header['Vertical velocity range']))
elif('Orientation' in l):
header['Orientation'] = l.split()[-1]
if('DOWN' in header['Orientation']):
header['updown'] = True
else:
header['updown'] = False
logger.debug('Orientation ' + header['Orientation'] + ' updown:' + str(header['updown']))
elif('Number of checksum errors' in l):
header['Number of checksum errors'] = int(l.split()[-1])
elif('Time of first measurement' in l):
ind2 = l.rfind(' ')
tstr = l[ind2+2:].replace('\n','')
ttmp = datetime.datetime.strptime(tstr,datefmt)
ttmp = ttmp.replace(tzinfo=self.timezone)
header['Time of first measurement'] = ttmp
elif('Time of last measurement' in l):
ind2 = l.rfind(' ')
tstr = l[ind2+2:].replace('\n','')
ttmp = datetime.datetime.strptime(tstr,datefmt)
ttmp = ttmp.replace(tzinfo=self.timezone)
header['Time of last measurement'] = ttmp
elif('Transformation matrix' in l):
logger.debug('Transformation matrix found')
header['Transformation matrix'] = np.zeros((3,3))
# Get all three lines
tmp = []
tmp.append(l)
tmp.append(fhdr.readline())
tmp.append(fhdr.readline())
for i in range(3):
T_tmp = np.asarray(tmp[i].split()[-3:]).astype(np.float)
header['Transformation matrix'][i,:] = T_tmp
logger.debug(str(header['Transformation matrix']))
elif('Magnetometer calibration matrix' in l):
logger.debug('Magnetometer calibration matrix found')
header['Magnetometer calibration matrix'] = np.zeros((3,3))
# Get all three lines
tmp = []
tmp.append(l)
tmp.append(fhdr.readline())
tmp.append(fhdr.readline())
for i in range(3):
T_tmp = np.asarray(tmp[i].split()[-3:]).astype(np.float)
header['Magnetometer calibration matrix'][i,:] = T_tmp
logger.debug(str(header['Magnetometer calibration matrix']))
else:
pass
if(header_field is not None):
if(header_field == 'sensors'):
l = l.replace('\n','').replace('\r','').strip() # remove return and trailing/leading blanks
lsp = re.sub(" +" , "\t", l).split('\t')
print('sensors',lsp)
field = lsp[1]
value = lsp[0]
header[header_field][field] = int(value)
elif(header_field == 'distance'):
l = l.replace('\n','').replace('\r','').strip() # remove return and trailing/leading blanks
lsp = re.sub(" +" , "\t", l).split('\t')
cell = lsp[0]
beam = lsp[1]
vertical = lsp[2]
print(cell,beam,vertical)
header[header_field]['cell'].append(int(cell))
header[header_field]['beam'].append(float(beam))
header[header_field]['vertical'].append(float(vertical))
else:
ind2 = l.rfind(' ')
data = l[ind2+2:].replace('\n','').replace('\r','')
field = l[:ind].replace('\n','').replace('\r','')
header[header_field][field] = data
#print(l.split())
return header
def process_rawdata(self):
""" Processes .sen data stored in data['sen'] and the remaining rawdata
"""
print('Creating time axis')
t = []
tu = []
for i in range(np.shape(self.rawdata['sen'][:,0])[0]):
month = int(self.rawdata['sen'][i,0])
day = int(self.rawdata['sen'][i,1])
year = int(self.rawdata['sen'][i,2])
hour = int(self.rawdata['sen'][i,3])
minute = int(self.rawdata['sen'][i,4])
millis = self.rawdata['sen'][i,5]%1
second = int(self.rawdata['sen'][i,5] - millis)
micro = int(millis*1000*1000)
ttmp = datetime.datetime(year,month,day,hour,minute,second,micro,tzinfo=self.timezone)
t.append(ttmp)
tu.append(ttmp.timestamp())
self.t = t # datetime time
self.tu = tu # unix time
self.data = {}
for k in self.header['sensors'].keys():
ind_key = self.header['sensors'][k] - 1
self.data[k] = self.rawdata['sen'][:,ind_key]
# Processing the remaining data
# For a profiler (Aquadopp)
aquadopp_keys = ['v1','v2','v3','a1','a2','a3','c1','c2','c3']
for key in aquadopp_keys:
if(key in self.rawdata.keys()):
print('Getting data from: ' + key + ' (profiler)')
self.data[key] = self.rawdata[key][:,2:]
if('distance' in self.header.keys()):
self.data['dis_beam'] = np.asarray(self.header['distance']['beam'])
self.data['dis_vertical'] = np.asarray(self.header['distance']['vertical'])
vector_keys = ['dat']
for key in vector_keys:
if(key in self.rawdata.keys()):
print('Getting data from: ' + key + ' (Vector)')
self.data[key] = self.rawdata[key][:,2:]
def rot_vel(self,coord,updown=None,save=False):
""" Rotates the velocities to different coordinate system
Args:
coord:
updown:
save:
"""
logger.debug('trans_coord():')
T = self.header['Transformation matrix'][:]
if(updown == None):
updown = self.header['updown']
# flip axes if instrument is pointing downward
# (so from here on, XYZ refers to a right-handed coordinate system
# with z pointing upward)
if updown:
logger.debug('Downlooking, changing matrix')
T[1,:] = -T[1,:];
T[2,:] = -T[2,:];
v1_rot = np.zeros(np.shape(self.data['v1']))
v2_rot = np.zeros(np.shape(self.data['v2']))
v3_rot = np.zeros(np.shape(self.data['v3']))
try:
v1_rep_rot = np.zeros(np.shape(self.data['v1_rep']))
v2_rep_rot = np.zeros(np.shape(self.data['v2_rep']))
v3_rep_rot = np.zeros(np.shape(self.data['v3_rep']))
repaired = True
except:
repaired_false = True
pass
print(np.shape(self.data['v1']))
if(coord == 'XYZ'):
if(self.header['Coordinate system'] == 'BEAM'):
logger.debug('BEAM to XYZ')
for i in range(np.shape(v1_rot)[0]):
for j in range(np.shape(v1_rot)[1]):
v1_rot[i,j] = T[0,0] * self.data['v1'][i,j] + T[0,1] * self.data['v2'][i,j] + T[0,2] * self.data['v3'][i,j]
v2_rot[i,j] = T[1,0] * self.data['v1'][i,j] + T[1,1] * self.data['v2'][i,j] + T[1,2] * self.data['v3'][i,j]
v3_rot[i,j] = T[2,0] * self.data['v1'][i,j] + T[2,1] * self.data['v2'][i,j] + T[2,2] * self.data['v3'][i,j]
if repaired:
v1_rep_rot[i,j] = T[0,0] * self.data['v1_rep'][i,j] + T[0,1] * self.data['v2_rep'][i,j] + T[0,2] * self.data['v3_rep'][i,j]
v2_rep_rot[i,j] = T[1,0] * self.data['v1_rep'][i,j] + T[1,1] * self.data['v2_rep'][i,j] + T[1,2] * self.data['v3_rep'][i,j]
v3_rep_rot[i,j] = T[2,0] * self.data['v1_rep'][i,j] + T[2,1] * self.data['v2_rep'][i,j] + T[2,2] * self.data['v3_rep'][i,j]
if save:
logger.debug('saving data in trans')
try: # Check if self.trans is existing
self.trans
except:
self.rotvel = {}
if(coord == 'XYZ'):
self.rotvel['u'] = v1_rot[:]
self.rotvel['v'] = v2_rot[:]
self.rotvel['w'] = v3_rot[:]
if repaired:
# Save the repaired data as well
self.rotvel['u_rep'] = v1_rep_rot[:]
self.rotvel['v_rep'] = v2_rep_rot[:]
self.rotvel['w_rep'] = v3_rep_rot[:]
return [v1_rot,v2_rot,v3_rot]
def repair_phase_shift(self,vel=None,threshold=None, save = False):
"""Tries to repair a phase shift in pulse coherent measurements. It
assumes that the first measured value is correct.
"""
if(vel == None):
vel = self.data
logger.debug('repairing native velocity')
coordinate_system = self.header['Coordinate system']
vel_all = [self.data['v1'],self.data['v2'],self.data['v3']]
else:
vel_all = [vel]
vel_rep_all = []
for vel_tmp in vel_all:
# Compute threshold from header data
if( coordinate_system == 'BEAM'):
logger.debug('Using thresholds for beam coordinates')
# Get the factor for the beam from the vertical velocity
fac = np.linalg.inv(self.header['Transformation matrix'])[0,2]
threshold_tmp = self.header['Vertical velocity range']# * fac
else:
logger.debug('Unknown threshold, returning')
return
vel_rep = np.zeros(np.shape(vel_tmp))
for i in range(np.shape(vel_rep)[1]):
vel_rep[:,i] = self.repair_phase_shift_vector(vel_tmp[:,i],threshold_tmp)
vel_rep_all.append(vel_rep)
print('hallo',vel is self.data)
if((vel is self.data) and save):
logger.debug("Saving data as data['v1_rep'] etc")
self.data['v1_rep'] = vel_rep_all[0]
self.data['v2_rep'] = vel_rep_all[1]
self.data['v3_rep'] = vel_rep_all[2]
def repair_phase_shift_vector(self,vel,threshold):
"""Tries to repair a phase shift in pulse coherent measurements. It
assumes that the first measured value is correct.
"""
vel_rep = vel.copy()
vthresh = threshold - 0.3 * threshold
for i in range(1,len(vel)):
if((np.sign(vel_rep[i-1]) != np.sign(vel_rep[i])) and (abs(vel_rep[i-1]) > vthresh) and (abs(vel_rep[i]) > vthresh)):
#print('Phase shift!')
dv = threshold - abs(vel_rep[i])
vel_rep[i] = np.sign(vel_rep[i-1]) * (threshold + dv)
return vel_rep
|
MarineDataTools/pynortek
|
pynortek/pynortek.py
|
pynortek.py
|
py
| 17,352 |
python
|
en
|
code
| 4 |
github-code
|
6
|
72407617147
|
def parens(num, the_str='()'):
the_set = set()
if num == 1:
return [the_str]
else:
for i in range(len(the_str) + 1):
the_set.add(the_str[:i] + '()' + the_str[i:])
for i in the_set:
temp_list = parens(num - 1, i)
if type(temp_list) == set:
the_set = the_set | temp_list
return the_set
def workaround(num):
output = list(parens(num))
to_return = []
for i in output:
if len(i) == num * 2:
to_return.append(i)
return to_return
if __name__ == '__main__':
print(workaround(8))
|
endere/code-katas
|
parens/parens.py
|
parens.py
|
py
| 609 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15915660899
|
"""initial
Revision ID: 977a56225963
Revises: None
Create Date: 2016-09-24 22:05:55.701455
"""
# revision identifiers, used by Alembic.
revision = '977a56225963'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('series',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=120), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('title')
)
op.create_table('chapter',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=120), nullable=True),
sa.Column('body', sa.String(length=1000000), nullable=True),
sa.Column('series_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['series_id'], ['series.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('title')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('chapter')
op.drop_table('series')
### end Alembic commands ###
|
cloudiirain/Website
|
migrations/versions/977a56225963_.py
|
977a56225963_.py
|
py
| 1,139 |
python
|
en
|
code
| null |
github-code
|
6
|
8655579477
|
from torch import nn
import torch
import utils
import cv2
import numpy as np
import supervisely_lib as sly
def inference(model: nn.Module, input_height, input_width, image_path, device=None):
with torch.no_grad():
model.eval()
image = sly.image.read(image_path) # RGB
input = utils.prepare_image_input(image, input_width, input_height)
input = torch.unsqueeze(input, 0)
input = utils.cuda(input, device)
output = model(input)
image_height, image_width = image.shape[:2]
predicted_classes_indices = output.data.cpu().numpy().argmax(axis=1)[0]
result = cv2.resize(predicted_classes_indices, (image_width, image_height), interpolation=cv2.INTER_NEAREST)
return result
def convert_prediction_to_sly_format(predicted_class_indices, classes_json, model_classes: sly.ProjectMeta):
height, width = predicted_class_indices.shape[:2]
labels = []
for idx, class_info in enumerate(classes_json): # curr_col2cls.items():
class_mask = predicted_class_indices == idx # exact match (3-channel img & rgb color)
if not np.any(class_mask):
# 0 pixels for class
continue
bitmap = sly.Bitmap(data=class_mask)
obj_class = model_classes.get_obj_class(class_info["title"])
labels.append(sly.Label(bitmap, obj_class))
ann = sly.Annotation(img_size=(height, width), labels=labels)
return ann
def load_model(weights_path, num_classes, model_name, device):
from train import model_list
model_class = model_list[model_name]["class"]
model = model_class(num_classes=num_classes)
state = torch.load(weights_path)
model.load_state_dict(state)
model.to(device)
model.eval()
return model
|
supervisely-ecosystem/unet
|
custom_net/inference.py
|
inference.py
|
py
| 1,728 |
python
|
en
|
code
| 2 |
github-code
|
6
|
1856720220
|
"""Unit test is expected to be here, while I use some usage cases instead."""
from name_analyzer.name_analysis import Name_analysis
if __name__ == '__main__':
# query = 'Martin Reynolds British'
# query = 'Martin Reynolds Maynard'
# query = 'Darry Holliday University of Holy Cross'
# query = 'Caron Anderson Emory University'
# query = 'Darryl Holliday City Bureau'
# query = 'Yi Fang SCU'
query = {
'first_name':'Yi',
'last_name': 'Fang', #required
'affiliation': 'SCU',
'title': 'Dr.'
}
obj = Name_analysis(google_dir= 'google img directory', bing_dir= 'bing img directory')
result = obj.analyze_name(query)
print(result)
|
LinZhihaozlin22/textual_name_analyzer
|
tests/test.py
|
test.py
|
py
| 704 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40056110993
|
#https://docs.python.org/3/library/heapq.html
#https://www.programiz.com/python-programming/methods/dictionary/get
#Link: https://leetcode.com/problems/top-k-frequent-elements/
# Name: Top K Frequent Elements
# Difficulty: Medium
# Topic: Min Heap
#Time: O(n log k) since we update the root a maximum of n times, each update is a log k operation
#Space: O(n + k) for size of heap used and the hashmpa size
import heapq
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
freqMap = {}
for num in nums:
if(num not in freqMap):
freqMap[num] = 0
freqMap[num] += 1
return heapq.nlargest(k, freqMap.keys(), key=freqMap.get)
|
Shivaansh/AlgoExpert-LeetCode-Solutions
|
LeetCode Problems/Python/TopKFrequentElements.py
|
TopKFrequentElements.py
|
py
| 749 |
python
|
en
|
code
| 2 |
github-code
|
6
|
72532362749
|
import logging
from asyncio import CancelledError, Task, create_task
from collections.abc import AsyncGenerator
from contextlib import asynccontextmanager, suppress
from fastapi import FastAPI
from servicelib.logging_utils import log_context
from watchdog.observers.api import DEFAULT_OBSERVER_TIMEOUT
from ._context import OutputsContext
from ._event_filter import EventFilter
from ._event_handler import EventHandlerObserver
from ._manager import OutputsManager
_logger = logging.getLogger(__name__)
class OutputsWatcher:
def __init__(
self, *, outputs_manager: OutputsManager, outputs_context: OutputsContext
) -> None:
self.outputs_manager = outputs_manager
self.outputs_context = outputs_context
self._task_events_worker: Task | None = None
self._event_filter = EventFilter(outputs_manager=outputs_manager)
self._observer_monitor: EventHandlerObserver = EventHandlerObserver(
outputs_context=self.outputs_context,
outputs_manager=self.outputs_manager,
heart_beat_interval_s=DEFAULT_OBSERVER_TIMEOUT,
)
async def _worker_events(self) -> None:
while True:
event: str | None = (
await self.outputs_context.port_key_events_queue.coro_get()
)
if event is None:
break
await self._event_filter.enqueue(event)
async def enable_event_propagation(self) -> None:
await self.outputs_context.toggle_event_propagation(is_enabled=True)
async def disable_event_propagation(self) -> None:
await self.outputs_context.toggle_event_propagation(is_enabled=False)
async def start(self) -> None:
with log_context(_logger, logging.INFO, f"{OutputsWatcher.__name__} start"):
self._task_events_worker = create_task(
self._worker_events(), name="outputs_watcher_events_worker"
)
await self._event_filter.start()
await self._observer_monitor.start()
async def shutdown(self) -> None:
"""cleans up spawned tasks which might be pending"""
with log_context(_logger, logging.INFO, f"{OutputsWatcher.__name__} shutdown"):
await self._event_filter.shutdown()
await self._observer_monitor.stop()
if self._task_events_worker is not None:
self._task_events_worker.cancel()
with suppress(CancelledError):
await self._task_events_worker
def setup_outputs_watcher(app: FastAPI) -> None:
async def on_startup() -> None:
assert isinstance(app.state.outputs_context, OutputsContext) # nosec
outputs_context: OutputsContext = app.state.outputs_context
outputs_manager: OutputsManager
outputs_manager = app.state.outputs_manager # nosec
app.state.outputs_watcher = OutputsWatcher(
outputs_manager=outputs_manager,
outputs_context=outputs_context,
)
await app.state.outputs_watcher.start()
await disable_event_propagation(app)
async def on_shutdown() -> None:
outputs_watcher: OutputsWatcher | None = app.state.outputs_watcher
if outputs_watcher is not None:
await outputs_watcher.shutdown()
app.add_event_handler("startup", on_startup)
app.add_event_handler("shutdown", on_shutdown)
async def disable_event_propagation(app: FastAPI) -> None:
outputs_watcher: OutputsWatcher | None = app.state.outputs_watcher
if outputs_watcher is not None:
await outputs_watcher.disable_event_propagation()
async def enable_event_propagation(app: FastAPI) -> None:
outputs_watcher: OutputsWatcher | None = app.state.outputs_watcher
if outputs_watcher is not None:
await outputs_watcher.enable_event_propagation()
@asynccontextmanager
async def event_propagation_disabled(app: FastAPI) -> AsyncGenerator[None, None]:
try:
await disable_event_propagation(app)
yield None
finally:
await enable_event_propagation(app)
|
ITISFoundation/osparc-simcore
|
services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_watcher.py
|
_watcher.py
|
py
| 4,081 |
python
|
en
|
code
| 35 |
github-code
|
6
|
32347991199
|
import cv2 as cv
import numpy as np
def nothing(x):
pass
cap= cv.VideoCapture('pranay1.avi')
fourcc= cv.VideoWriter_fourcc('X', 'V', 'I', 'D')
out= cv.VideoWriter('final1.avi', fourcc, 20.0, (640,480) )
#cv.namedWindow('Tracking')
#cv.createTrackbar('l_h', 'Tracking', 0, 255, nothing)
#cv.createTrackbar('l_s', 'Tracking', 0, 255, nothing)
#cv.createTrackbar('l_v', 'Tracking', 0, 255, nothing)
#cv.createTrackbar('u_h', 'Tracking', 255, 255, nothing)
#cv.createTrackbar('u_s', 'Tracking', 255, 255, nothing)
#cv.createTrackbar('u_v', 'Tracking', 255, 255, nothing)
while(True):
ret, frame= cap.read()
frame= cv.resize(frame, (640,480))
hsv= cv.cvtColor(frame, cv.COLOR_BGR2HSV)
#lh = cv.getTrackbarPos('l_h', 'Tracking')
#ls = cv.getTrackbarPos('l_s', 'Tracking')
#lv = cv.getTrackbarPos('l_v', 'Tracking')
#uh = cv.getTrackbarPos('u_h', 'Tracking')
#us = cv.getTrackbarPos('u_s', 'Tracking')
#uv = cv.getTrackbarPos('u_v', 'Tracking')
lh= 82
ls= 51
lv= 51
uh= 133
us= 255
uv= 255
lhb= 0
lsb= 0
lvb= 0
uhb= 255
usb= 255
uvb= 0
lbb= np.array([lhb, lsb, lvb])
ubb= np.array([uhb, usb, uvb])
lb= np.array([lh,ls,lv])
ub= np.array([uh,us,uv])
backg= cv.imread('pranay42.JPG')
backg= cv.resize(backg, (640,480))
mask= cv.inRange(hsv, lb, ub)
masknot= cv.bitwise_not(mask)
res= cv.bitwise_and(frame, frame, mask = masknot)
a= cv.bitwise_and(backg, backg, mask=mask)
fres= cv.addWeighted(res,1,a,1,1)
cv.imshow('frame', frame)
cv.imshow('fres', fres)
key= cv.waitKey(1)
if ret==True:
out.write(fres)
if key == 27:
break
cap.release()
cv.destroyAllWindows()
|
pranayvarmas/Virtual-Keyboard
|
Mini-Projects/Invisible Cloak.py
|
Invisible Cloak.py
|
py
| 1,721 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11160464439
|
from django.db import models
from book_archive.models import Genre
from config.models import User
class BookRequest(models.Model):
title = models.CharField('Наименование', max_length=128)
author = models.CharField('Автор', max_length=128, null=True, blank=True)
genre = models.ForeignKey(Genre, on_delete=models.CASCADE, verbose_name='Жанры', null=True, blank=True)
year = models.PositiveIntegerField('Год', default=0)
created_at = models.DateTimeField('Дата запроса', auto_now_add=True)
user = models.ForeignKey(
User, null=True, blank=True,
on_delete=models.CASCADE, verbose_name='Кто добавил',
)
is_approved = models.BooleanField('Одобрено ли', null=True, blank=True)
class Meta:
db_table = 'book_request'
verbose_name = 'Запрос книги'
verbose_name_plural = 'Запросы книг'
def __str__(self):
return f'{self.title}'
|
SliceOfMind/thesombot_web
|
book_request/models.py
|
models.py
|
py
| 989 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37349408
|
"""See `TestSet` for an example."""
from typing import Type, MutableSet
from tests.collection_testing import unordered_equal
class MutableSetTests:
mutable_set: Type = None
@classmethod
def create_mutable_set(cls) -> MutableSet:
return cls.mutable_set()
@staticmethod
def get_element(i):
return i
def test_add_coverage(self):
instance = self.create_mutable_set()
element1 = self.get_element(1)
instance.add(element1)
# And add a second time for good measure
instance.add(element1)
def test_discard_missing_element_passes(self):
instance = self.create_mutable_set()
element1 = self.get_element(1)
instance.discard(element1)
def test_discard_passes(self):
instance = self.create_mutable_set()
element1 = self.get_element(1)
instance.add(element1)
instance.discard(element1)
def test_contains_len(self):
instance = self.create_mutable_set()
element1 = self.get_element(1)
assert len(instance) == 0
assert element1 not in instance
instance.add(element1)
assert element1 in instance
assert len(instance) == 1
element2 = self.get_element(2)
assert element2 not in instance
instance.add(element2)
assert element2 in instance
assert len(instance) == 2
assert element1 in instance
instance.discard(element1)
assert element1 not in instance
assert len(instance) == 1
assert element2 in instance
instance.discard(element2)
assert element1 not in instance
assert element2 not in instance
assert len(instance) == 0
def test_iter(self):
instance = self.create_mutable_set()
element1 = self.get_element(1)
element2 = self.get_element(2)
assert list(iter(instance)) == []
instance.add(element1)
assert unordered_equal(iter(instance), [element1])
instance.add(element2)
assert unordered_equal(iter(instance), [element1, element2])
class TestMutableSet(MutableSetTests):
mutable_set = set
|
BlackHC/mamo
|
tests/collection_testing/test_mutable_set.py
|
test_mutable_set.py
|
py
| 2,179 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10230332445
|
"""
This file defines the recorder classes which log eval results in different ways,
such as to a local JSON file or to a remote Snowflake database.
If you would like to implement a custom recorder, you can see how the
`LocalRecorder` and `Recorder` classes inherit from the `RecorderBase` class and
override certain methods.
"""
import atexit
import contextlib
import dataclasses
import logging
import threading
import time
from contextvars import ContextVar
from datetime import datetime, timezone
from typing import Any, List, Optional, Sequence, Text
import blobfile as bf
import requests
import evals
from evals.base import RunSpec
from evals.data import jsondumps
from evals.utils.misc import t
from evals.utils.snowflake import SnowflakeConnection
logger = logging.getLogger(__name__)
MIN_FLUSH_EVENTS = 100
MAX_SNOWFLAKE_BYTES = 16 * 10**6
MIN_FLUSH_SECONDS = 10
_default_recorder: ContextVar[Optional["RecorderBase"]] = ContextVar(
"default_recorder", default=None
)
def default_recorder() -> Optional["RecorderBase"]:
return _default_recorder.get()
@dataclasses.dataclass
class Event:
run_id: str
event_id: int
sample_id: Optional[str]
type: str
data: dict
created_by: str
created_at: str
class RecorderBase:
"""
The standard events for which recording methods are provided are:
- `match`: A match or non match, as specified by the `correct` bool, between
the `expected` and `picked` results.
- `embedding`: An embedding of the `prompt` of type `embedding_type`.
- `sampling`: What was `sampled` from the model given the input `prompt`.
- `cond_logp`: The conditional log probability, as `logp`, of the
`completion` from the model given the input `prompt`.
- `pick_option`: The option `picked` by the model out of the valid `options`
given the input `prompt`.
- `raw`: A raw sample specified by the `data`.
- `metrics`: A set of metrics specified by the `kwargs`.
- `error`: An `error` along with an accompanying `msg`.
- `extra`: Any extra `data` of interest to be recorded.
For these events, helper methods are defined at the bottom of this file.
More generally, you can record any event by calling `record_event` with the
event `type` and `data`.
Finally, you can also record a final report using `record_final_report`.
"""
def __init__(
self,
run_spec: evals.base.RunSpec,
) -> None:
self._sample_id: ContextVar[Optional[int]] = ContextVar("_sample_id", default=None)
self.run_spec = run_spec
self._events: List[Event] = []
self._last_flush_time = time.time()
self._flushes_done = 0
self._written_events = 0
self._flushes_started = 0
self._event_lock = threading.Lock()
self._paused_ids: List[str] = []
atexit.register(self.flush_events)
@contextlib.contextmanager
def as_default_recorder(self, sample_id: str):
sample_id_token = self._sample_id.set(sample_id)
default_recorder_token = _default_recorder.set(self)
yield
_default_recorder.reset(default_recorder_token)
self._sample_id.reset(sample_id_token)
def current_sample_id(self) -> Optional[str]:
return self._sample_id.get()
def pause(self):
sample_id = self.current_sample_id()
with self._event_lock:
if sample_id not in self._paused_ids:
self._paused_ids.append(sample_id)
def unpause(self):
sample_id = self.current_sample_id()
with self._event_lock:
if sample_id in self._paused_ids:
self._paused_ids.remove(sample_id)
def is_paused(self, sample_id: str = None):
if sample_id is None:
sample_id = self.current_sample_id()
with self._event_lock:
return sample_id in self._paused_ids
def get_events(self, type: str) -> Sequence[Event]:
with self._event_lock:
return [event for event in self._events if event.type == type]
def get_metrics(self):
return list(map(lambda x: x.data, self.get_events("metrics")))
def get_scores(self, key: str):
return list(map(lambda e: e.data[key], self.get_events("metrics")))
def _create_event(self, type, data=None, sample_id=None):
if sample_id is None:
sample_id = self.current_sample_id()
if sample_id is None:
raise ValueError("No sample_id set! Either pass it in or use as_default_recorder!")
return Event(
run_id=self.run_spec.run_id,
event_id=len(self._events),
type=type,
sample_id=sample_id,
data=data,
created_by=self.run_spec.created_by,
created_at=str(datetime.now(timezone.utc)),
)
def _flush_events_internal(self, events_to_write: Sequence[Event]):
pass
def flush_events(self):
with self._event_lock:
if len(self._events) == self._written_events:
return
events_to_write = self._events[self._written_events :]
self._written_events = len(self._events)
self._flushes_started += 1
self._flush_events_internal(events_to_write)
def record_event(self, type, data=None, sample_id=None):
if sample_id is None:
sample_id = self.current_sample_id()
if sample_id is None:
raise ValueError("No sample_id set! Either pass it in or use as_default_recorder!")
if self.is_paused(sample_id):
return
with self._event_lock:
event = Event(
run_id=self.run_spec.run_id,
event_id=len(self._events),
type=type,
sample_id=sample_id,
data=data,
created_by=self.run_spec.created_by,
created_at=str(datetime.now(timezone.utc)),
)
self._events.append(event)
if (
self._flushes_done < self._flushes_started
or len(self._events) < self._written_events + MIN_FLUSH_EVENTS
or time.time() < self._last_flush_time + MIN_FLUSH_SECONDS
):
return
events_to_write = self._events[self._written_events :]
self._written_events = len(self._events)
self._flushes_started += 1
self._flush_events_internal(events_to_write)
def record_match(self, correct: bool, *, expected=None, picked=None, sample_id=None, **extra):
assert isinstance(
correct, bool
), f"correct must be a bool, but was a {type(correct)}: {correct}"
if isinstance(expected, list) and len(expected) == 1:
expected = expected[0]
data = {
"correct": bool(correct),
"expected": expected,
"picked": picked,
**extra,
}
self.record_event("match", data, sample_id=sample_id)
def record_embedding(self, prompt, embedding_type, sample_id=None, **extra):
data = {
"prompt": prompt,
"embedding_type": embedding_type,
**extra,
}
self.record_event("embedding", data, sample_id=sample_id)
def record_sampling(self, prompt, sampled, sample_id=None, **extra):
data = {
"prompt": prompt,
"sampled": sampled,
**extra,
}
self.record_event("sampling", data, sample_id=sample_id)
def record_cond_logp(self, prompt, completion, logp, sample_id=None, **extra):
data = {
"prompt": prompt,
"completion": completion,
"logp": logp,
**extra,
}
self.record_event("cond_logp", data, sample_id=sample_id)
def record_pick_option(self, prompt, options, picked, sample_id=None, **extra):
data = {
"prompt": prompt,
"options": options,
"picked": picked,
**extra,
}
self.record_event("pick_option", data, sample_id=sample_id)
def record_raw(self, data):
self.record_event("raw_sample", data)
def record_metrics(self, **kwargs):
self.record_event("metrics", kwargs)
def record_error(self, msg: str, error: Exception, **kwargs):
data = {
"type": type(error).__name__,
"message": str(error),
}
data.update(kwargs)
self.record_event("error", data)
def record_extra(self, data, sample_id=None):
self.record_event("extra", data, sample_id=sample_id)
def record_final_report(self, final_report: Any):
logging.info(f"Final report: {final_report}. Not writing anywhere.")
def _green(str):
return f"\033[1;32m{str}\033[0m"
def _red(str):
return f"\033[1;31m{str}\033[0m"
class DummyRecorder(RecorderBase):
"""
A "recorder" which only logs certain events to the console.
Can be used by passing `--dry-run` when invoking `oaieval`.
"""
def __init__(self, run_spec: RunSpec, log: bool = True):
super().__init__(run_spec)
self.log = log
def record_event(self, type, data, sample_id=None):
from evals.registry import registry
if self.run_spec is None:
return
base_eval_spec = registry.get_base_eval(self.run_spec.base_eval)
if base_eval_spec and len(base_eval_spec.metrics) >= 1:
primary_metric = base_eval_spec.metrics[0]
else:
primary_metric = "accuracy"
with self._event_lock:
event = self._create_event(type, data)
self._events.append(event)
msg = f"Not recording event: {event}"
if type == "match":
accuracy_good = (
primary_metric == "accuracy" or primary_metric.startswith("pass@")
) and (data.get("correct", False) or data.get("accuracy", 0) > 0.5)
f1_score_good = primary_metric == "f1_score" and data.get("f1_score", 0) > 0.5
if accuracy_good or f1_score_good:
msg = _green(msg)
else:
msg = _red(msg)
if self.log:
logging.info(msg)
class LocalRecorder(RecorderBase):
"""
A recorder which logs events to the specified JSON file.
This is the default recorder used by `oaieval`.
"""
def __init__(self,
log_path: Optional[str],
run_spec: RunSpec,
hidden_data_fields: Sequence[Text] = []):
"""
Initializes a LocalRecorder.
Args:
log_path (Optional[str]): Path to which the LocalRecorder will
record events. Currently accepts local paths, google cloud
storage paths, or Azure blob paths.
run_spec (RunSpec): Passed to the superclass to provide metadata
about the current evals run.
hidden_data_fields (Sequence[Text]): Fields to avoid writing in the
output. This is particularly useful when using a language model
as an evaluator of sensitive customer data which should not be
written to disc.
"""
super().__init__(run_spec)
self.event_file_path = log_path
self.hidden_data_fields = hidden_data_fields
if log_path is not None:
with bf.BlobFile(log_path, "wb") as f:
f.write((jsondumps({"spec": dataclasses.asdict(run_spec)}) + "\n").encode("utf-8"))
def _flush_events_internal(self, events_to_write: Sequence[Event]):
start = time.time()
try:
lines = [jsondumps(event, exclude_keys=self.hidden_data_fields) + "\n" for event in events_to_write]
except TypeError as e:
logger.error(f"Failed to serialize events: {events_to_write}")
raise e
with bf.BlobFile(self.event_file_path, "ab") as f:
f.write(b"".join([line.encode("utf-8") for line in lines]))
logger.info(
f"Logged {len(lines)} rows of events to {self.event_file_path}: insert_time={t(time.time()-start)}"
)
self._last_flush_time = time.time()
self._flushes_done += 1
def record_final_report(self, final_report: Any):
with bf.BlobFile(self.event_file_path, "ab") as f:
f.write((jsondumps({"final_report": final_report}) + "\n").encode("utf-8"))
logging.info(f"Final report: {final_report}. Logged to {self.event_file_path}")
class HttpRecorder(RecorderBase):
def __init__(
self,
url: str,
run_spec: RunSpec,
local_fallback_path: str,
fail_percent_threshold: int = 5,
batch_size: int = 100,
):
super().__init__(run_spec)
self.url = url
self.batch_size = batch_size
self.fail_percent_threshold = fail_percent_threshold / 100
self.failed_requests = 0 # Add this line to track failed requests
self.local_fallback_path = local_fallback_path
self.local_fallback_recorder = LocalRecorder(local_fallback_path, run_spec)
logger.info(f"HttpRecorder initialized with URL {self.url}")
def _flush_events_internal(self, events_to_write: Sequence[Event]):
batch_size = self.batch_size
for i in range(0, len(events_to_write), batch_size):
batch = list(events_to_write[i : i + batch_size])
try:
self._send_event(batch)
except RuntimeError as e:
logger.error(f"Falling back to LocalRecorder due to error: {str(e)}")
self.local_fallback_recorder._flush_events_internal(batch)
raise RuntimeError(
"An error occurred when sending events. Your events have been saved locally using the Local recorder."
)
def _send_event(self, events: List[Event]):
# Convert the events to dictionaries
events_dict = [dataclasses.asdict(event) for event in events]
logger.debug(f"Sending events: {events_dict}")
try:
# Send the events to the specified URL
response = requests.post(self.url, json=events_dict)
# If the request succeeded, log a success message
if response.ok:
logger.debug(f"Events sent successfully")
# If the request failed, log a warning and increment failed_requests
else:
logger.warning(f"Failed to send events: {response.text}")
self.failed_requests += len(
events
) # Increase the count by the number of events in the failed request
except Exception as e:
logger.warning(f"Failed to send events: {str(e)}")
self.failed_requests += len(
events
) # Increase the count by the number of events in the failed request
# Check if the proportion of failed requests exceeds the threshold
fail_threshold = self.fail_percent_threshold
# Make a string for human comprehention
fail_threshold_str = str(fail_threshold * 100) + "%"
if self.failed_requests / len(self._events) > fail_threshold:
raise RuntimeError(
"The proportion of failed events has exceeded the threshold of: "
+ fail_threshold_str
+ "."
+ " Falling back to LocalRecorder. "
"You can modify this via the cli flag --http-fail-percent-threshold"
)
def record_final_report(self, final_report: Any):
# Convert the final report to a dictionary and prepare it as an event
report_event = Event(
run_id=self.run_spec.run_id,
event_id=len(self._events),
sample_id=None, # or you could use a specific id for final reports
type="final_report",
data=final_report,
created_by=self.run_spec.created_by,
created_at=str(datetime.now(timezone.utc)),
)
# Send the final report event
try:
self._send_event([report_event])
logging.info(f"Final report: {final_report}.")
logging.info(f"Data logged to: {self.url}")
except RuntimeError as e:
logger.error(f"Falling back to LocalRecorder due to error: {str(e)}")
self.local_fallback_recorder.record_final_report(final_report)
class Recorder(RecorderBase):
"""
A recorder which logs events to Snowflake.
Can be used by passing `--no-local-run` when invoking `oaieval`.
"""
def __init__(
self,
log_path: Optional[str],
run_spec: RunSpec,
snowflake_connection: Optional[SnowflakeConnection] = None,
) -> None:
super().__init__(run_spec)
self.event_file_path = log_path
self._writing_lock = threading.Lock()
if snowflake_connection is None:
snowflake_connection = SnowflakeConnection()
self._conn = snowflake_connection
if log_path is not None:
with bf.BlobFile(log_path, "wb") as f:
f.write((jsondumps({"spec": dataclasses.asdict(run_spec)}) + "\n").encode("utf-8"))
query = """
INSERT ALL INTO runs (run_id, model_name, eval_name, base_eval, split, run_config, settings, created_by, created_at)
VALUES (%(run_id)s, %(model_name)s, %(eval_name)s, %(base_eval)s, %(split)s, run_config, settings, %(created_by)s, %(created_at)s)
SELECT PARSE_JSON(%(run_config)s) AS run_config, PARSE_JSON(%(settings)s) AS settings
"""
self._conn.robust_query(
command=query,
params={
"run_id": run_spec.run_id,
# TODO: model_name -> completion_fns
"model_name": jsondumps(dict(completions=run_spec.completion_fns)),
"eval_name": run_spec.eval_name,
"base_eval": run_spec.base_eval,
"split": run_spec.split,
"run_config": jsondumps(run_spec.run_config),
"settings": jsondumps(run_spec.run_config.get("initial_settings", {})),
"created_by": run_spec.created_by,
"created_at": run_spec.created_at,
},
)
atexit.register(self.flush_events)
def _flush_events_internal(self, events_to_write: Sequence[Event]):
with self._writing_lock:
try:
lines = [jsondumps(event) + "\n" for event in events_to_write]
except TypeError as e:
logger.error(f"Failed to serialize events: {events_to_write}")
raise e
idx_l = 0
while idx_l < len(events_to_write):
total_bytes = 0
idx_r = idx_l
while (
idx_r < len(events_to_write)
and total_bytes + len(lines[idx_r]) < MAX_SNOWFLAKE_BYTES
):
total_bytes += len(lines[idx_r])
idx_r += 1
assert idx_r > idx_l
start = time.time()
buffer = [
(
event.run_id,
event.event_id,
event.sample_id,
event.type,
jsondumps(event.data),
event.created_by,
event.created_at,
)
for event in events_to_write[idx_l:idx_r]
]
query = """
INSERT INTO events (run_id, event_id, sample_id, type, data, created_by, created_at)
SELECT Column1 AS run_id, Column2 as event_id, Column3 AS sample_id, Column4 AS type, PARSE_JSON(Column5) AS data, Column6 AS created_by, Column7 AS created_at
FROM VALUES(%s, %s, %s, %s, %s, %s, %s)
"""
self._conn.robust_query(command=query, seqparams=buffer, many=True)
logger.info(
f"Logged {len(buffer)} rows of events to Snowflake: insert_time={t(time.time()-start)}"
)
idx_l = idx_r
with bf.BlobFile(self.event_file_path, "ab") as f:
f.write(b"".join([line.encode("utf-8") for line in lines]))
self._last_flush_time = time.time()
self._flushes_done += 1
def record_final_report(self, final_report: Any):
with self._writing_lock:
with bf.BlobFile(self.event_file_path, "ab") as f:
f.write((jsondumps({"final_report": final_report}) + "\n").encode("utf-8"))
query = """
UPDATE runs
SET final_report = PARSE_JSON(%(final_report)s)
WHERE run_id = %(run_id)s
"""
self._conn.robust_query(
command=query,
params={
"run_id": self.run_spec.run_id,
"final_report": jsondumps(final_report),
},
)
def record_event(self, type, data=None, sample_id=None):
# try to serialize data so we fail early!
_ = jsondumps(data)
return super().record_event(type, data, sample_id)
#########################################################################
### Helper methods which use the thread local global default recorder ###
#########################################################################
def current_sample_id() -> str:
return default_recorder().current_sample_id
def record_match(correct: bool, *, expected=None, picked=None, **extra):
return default_recorder().record_match(correct, expected=expected, picked=picked, **extra)
def record_embedding(prompt, embedding_type, **extra):
return default_recorder().record_embedding(prompt, embedding_type, **extra)
def record_sampling(prompt, sampled, **extra):
return default_recorder().record_sampling(prompt, sampled, **extra)
def record_cond_logp(prompt, completion, logp, **extra):
return default_recorder().record_cond_logp(prompt, completion, logp, **extra)
def record_pick_option(prompt, options, picked, **extra):
return default_recorder().record_pick_option(prompt, options, picked, **extra)
def record_raw(data):
return default_recorder().record_raw(data)
def record_metrics(**extra):
return default_recorder().record_metrics(**extra)
def record_error(msg: str, error: Exception = None, **extra):
return default_recorder().record_error(msg, error, **extra)
def record_extra(data):
return default_recorder().record_extra(data)
def record_event(type, data=None, sample_id=None):
return default_recorder().record_event(type, data, sample_id)
def pause():
return default_recorder().pause()
def unpause():
return default_recorder().unpause()
|
openai/evals
|
evals/record.py
|
record.py
|
py
| 23,030 |
python
|
en
|
code
| 12,495 |
github-code
|
6
|
16013407471
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
img=cv2.imread('/home/hasantha/Desktop/repos/old-yolov4-deepsort-master/data/download2.png' ,0)
#img=img[423:998,806:1408]
ret, bw_img = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY) #165
kernel = np.ones((1,1),np.uint8)
#erosion = cv2.erode(img,kernel,iterations = 1)
opening = cv2.morphologyEx(bw_img, cv2.MORPH_OPEN, kernel)
#closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
#Append lane line coordinates to a matrix
lane_line_co=[]
x_cord_lane = np.where(opening == 255)[1]#+806 #+805
y_cord_lane = np.where(opening == 255)[0]#+423 #+389
for q in range(0, len(x_cord_lane)):
lane_line_co.append((x_cord_lane[q],y_cord_lane[q]))
def get_lane_line_co():
return x_cord_lane,y_cord_lane,lane_line_co
#print(lane_line_co)
#print(get_lane_line_co()[2])
|
hasantha-nirmal/Traffic_Violation_Detection_Yolov4_Deep-Sort
|
lane_line_extract3.py
|
lane_line_extract3.py
|
py
| 850 |
python
|
en
|
code
| 23 |
github-code
|
6
|
1160957146
|
#!/bin/python3
import math
import os
import random
import re
import sys
def getMaxStreaks(toss):
result = [0,0]
for num in range(len(toss)-1):
repeat = 0
if toss[num] == "Heads":
for t in toss[num:]:
if t == "Heads":
repeat += 1
if repeat > result[0]:
result[0] = repeat
else:
for t in toss[num:]:
if t == "Tails":
repeat += 1
if repeat > result[1]:
result[1] = repeat
return result
if __name__ == '__main__':
toss_count = int(input().strip())
toss = []
for _ in range(toss_count):
toss_item = input()
toss.append(toss_item)
ans = getMaxStreaks(toss)
fptr.write(' '.join(map(str, ans)))
fptr.write('\n')
fptr.close()
|
ryanstang/Interaction-Simulation
|
simulton.py
|
simulton.py
|
py
| 914 |
python
|
en
|
code
| 0 |
github-code
|
6
|
810151506
|
# Add Binary - https://leetcode.com/problems/add-binary/
'''Given two binary strings a and b, return their sum as a binary string.
Example 1:
Input: a = "11", b = "1"
Output: "100"
Example 2:
Input: a = "1010", b = "1011"
Output: "10101"'''
class Solution:
def addBinary(self, a: str, b: str) -> str:
maxLength = max(len(a), len(b))
a = a.zfill(maxLength)
b = b.zfill(maxLength)
carry = 0
output = ""
for i in range(maxLength - 1, -1, -1):
if a[i] == "1":
carry += 1
if b[i] == "1":
carry += 1
if carry % 2 == 1:
output += "1"
else:
output += "0"
carry //= 2
if carry:
output += str(carry)
return output[::-1]
|
Saima-Chaity/Leetcode
|
Array_String/Add Binary.py
|
Add Binary.py
|
py
| 823 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20497360293
|
import random
from datetime import datetime
ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
conteudo = 'There are many variations of passages of Lorem Ipsum available, but the ' \
'majority have suffered alteration in some form, by injected humour, or randomised ' \
'words which look even slightly believable. '
excerto = 'Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots' \
' in a piece of classical Latin'
now = datetime.now()
year = str(now.year).zfill(4)
month = str(now.month).zfill(2)
hour = str(now.hour).zfill(2)
minute = str(now.minute).zfill(2)
second = str(now.second).zfill(2)
for categoria_post_id in range(1, 5): # Pressupõe 4 categorias
for i in range(5): # 5 posts para cada categoria
titulo_post = ' '.join([''.join(random.choices(ALPHABET, k=random.randint(3, 8))) for i in range(3)])
data_post = f'{year}-{month}-{str((now.day - i) % 29).zfill(2)} {hour}:{minute}:{second}'
conteudo_post = (conteudo * random.randint(3, 25) + '.</br>') * 5
excerto_post = excerto * random.randint(3, 5) + '. '
imagem_post = f'post_img/2022/02/26/13estacio-sistemas.jpg' # Mude para uma imagem que você tenha feito upload
publicado_post = random.randint(0, 1)
autor_post_id = 1 # id do seu super usuário
sql_post = (f"INSERT INTO blog_django.posts_post"
f"(titulo_post,data_post,conteudo_post,excerto_post,imagem_post,"
f"publicado_post,autor_post_id,categoria_post_id)"
f"VALUES ('{titulo_post}','{data_post}','{conteudo_post}',"
f"'{excerto_post}','{imagem_post}',{publicado_post},"
f"{autor_post_id},{categoria_post_id});")
print(sql_post)
|
Adriano1976/Curso-de-Python
|
Secao11-Django-com-Python-Projetos/Projeto-Blog/posts-generator.py
|
posts-generator.py
|
py
| 1,787 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
33787246110
|
from itertools import chain
import os
import pytest
@pytest.fixture(scope="module")
def organization_id():
"""Get Organization ID from the environment variable """
return os.environ["GCLOUD_ORGANIZATION"]
@pytest.fixture(scope="module")
def source_name(organization_id):
from google.cloud import securitycenter
client = securitycenter.SecurityCenterClient()
org_name = "organizations/{org_id}".format(org_id=organization_id)
source = client.create_source(
org_name,
{
"display_name": "Unit test source",
"description": "A new custom source that does X",
},
)
return source.name
def test_create_source(organization_id):
"""Create a new findings source. """
# [START create_source]
from google.cloud import securitycenter
client = securitycenter.SecurityCenterClient()
# organization_id is the numeric ID of the organization. e.g.:
# organization_id = "111122222444"
org_name = "organizations/{org_id}".format(org_id=organization_id)
created = client.create_source(
org_name,
{
"display_name": "Customized Display Name",
"description": "A new custom source that does X",
},
)
print("Created Source: {}".format(created.name))
# [END create_source]
def test_get_source(source_name):
"""Gets an existing source."""
# [START get_source]
from google.cloud import securitycenter
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
source = client.get_source(source_name)
print("Source: {}".format(source))
# [END get_source]
def test_update_source(source_name):
"""Updates a source's display name."""
# [START update_source]
from google.cloud import securitycenter
from google.protobuf import field_mask_pb2
client = securitycenter.SecurityCenterClient()
# Field mask to only update the display name.
field_mask = field_mask_pb2.FieldMask(paths=["display_name"])
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
updated = client.update_source(
{"name": source_name, "display_name": "Updated Display Name"},
update_mask=field_mask,
)
print("Updated Source: {}".format(updated))
# [END update_source]
assert updated.display_name == "Updated Display Name"
def test_add_user_to_source(source_name):
"""Gives a user findingsEditor permission to the source."""
user_email = "[email protected]"
# [START update_source_iam]
from google.cloud import securitycenter
from google.iam.v1 import policy_pb2
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# Get the old policy so we can do an incremental update.
old_policy = client.get_iam_policy(source_name)
print("Old Policy: {}".format(old_policy))
# Setup a new IAM binding.
binding = policy_pb2.Binding()
binding.role = "roles/securitycenter.findingsEditor"
# user_email is an e-mail address known to Cloud IAM (e.g. a gmail address).
# user_mail = [email protected]
binding.members.append("user:{}".format(user_email))
# Setting the e-tag avoids over-write existing policy
updated = client.set_iam_policy(
source_name, {"etag": old_policy.etag, "bindings": [binding]}
)
print("Updated Policy: {}".format(updated))
# [END update_source_iam]
assert any(
member == "user:[email protected]"
for member in chain.from_iterable(
binding.members for binding in updated.bindings
)
)
def test_list_source(organization_id):
"""Lists finding sources."""
i = -1
# [START list_sources]
from google.cloud import securitycenter
# Create a new client.
client = securitycenter.SecurityCenterClient()
# organization_id is the numeric ID of the organization. e.g.:
# organization_id = "111122222444"
org_name = "organizations/{org_id}".format(org_id=organization_id)
# Call the API and print out each existing source.
for i, source in enumerate(client.list_sources(org_name)):
print(i, source)
# [END list_sources]
assert i >= 0
def test_create_finding(source_name):
"""Creates a new finding."""
# [START create_finding]
from google.cloud import securitycenter
from google.cloud.securitycenter_v1.proto.finding_pb2 import Finding
from google.protobuf.timestamp_pb2 import Timestamp
# Create a new client.
client = securitycenter.SecurityCenterClient()
# Use the current time as the finding "event time".
now_proto = Timestamp()
now_proto.GetCurrentTime()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# Controlled by caller.
finding_id = "samplefindingid"
# The resource this finding applies to. The CSCC UI can link
# the findings for a resource to the corresponding Asset of a resource
# if there are matches.
resource_name = "//cloudresourcemanager.googleapis.com/organizations/11232"
# Call The API.
created_finding = client.create_finding(
source_name,
finding_id,
{
"state": Finding.ACTIVE,
"resource_name": resource_name,
"category": "MEDIUM_RISK_ONE",
"event_time": now_proto,
},
)
print(created_finding)
# [END create_finding]
assert len(created_finding.name) > 0
def test_create_finding_with_source_properties(source_name):
"""Demonstrate creating a new finding with source properties. """
# [START create_finding_with_properties]
from google.cloud import securitycenter
from google.cloud.securitycenter_v1.proto.finding_pb2 import Finding
from google.protobuf.timestamp_pb2 import Timestamp
from google.protobuf.struct_pb2 import Value
# Create a new client.
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# Controlled by caller.
finding_id = "samplefindingid2"
# The resource this finding applies to. The CSCC UI can link
# the findings for a resource to the corresponding Asset of a resource
# if there are matches.
resource_name = "//cloudresourcemanager.googleapis.com/organizations/11232"
# Define source properties values as protobuf "Value" objects.
str_value = Value()
str_value.string_value = "string_example"
num_value = Value()
num_value.number_value = 1234
# Use the current time as the finding "event time".
now_proto = Timestamp()
now_proto.GetCurrentTime()
created_finding = client.create_finding(
source_name,
finding_id,
{
"state": Finding.ACTIVE,
"resource_name": resource_name,
"category": "MEDIUM_RISK_ONE",
"source_properties": {"s_value": str_value, "n_value": num_value},
"event_time": now_proto,
},
)
print(created_finding)
# [END create_finding_with_properties]
def test_update_finding(source_name):
# [START update_finding]
from google.cloud import securitycenter
from google.protobuf.struct_pb2 import Value
from google.protobuf import field_mask_pb2
from google.protobuf.timestamp_pb2 import Timestamp
client = securitycenter.SecurityCenterClient()
# Only update the specific source property and event_time. event_time
# is required for updates.
field_mask = field_mask_pb2.FieldMask(
paths=["source_properties.s_value", "event_time"]
)
value = Value()
value.string_value = "new_string"
# Set the update time to Now. This must be some time greater then the
# event_time on the original finding.
now_proto = Timestamp()
now_proto.GetCurrentTime()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
finding_name = "{}/findings/samplefindingid2".format(source_name)
updated_finding = client.update_finding(
{
"name": finding_name,
"source_properties": {"s_value": value},
"event_time": now_proto,
},
update_mask=field_mask,
)
print(
"New Source properties: {}, Event Time {}".format(
updated_finding.source_properties, updated_finding.event_time.ToDatetime()
)
)
# [END update_finding]
def test_update_finding_state(source_name):
"""Demonstrate updating only a finding state."""
# [START update_finding_state]
from google.cloud import securitycenter
from google.cloud.securitycenter_v1.proto.finding_pb2 import Finding
from google.protobuf.timestamp_pb2 import Timestamp
from datetime import datetime
# Create a client.
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
finding_name = "{}/findings/samplefindingid2".format(source_name)
now_proto = Timestamp()
now_proto.GetCurrentTime()
# Call the API to change the finding state to inactive as of now.
new_finding = client.set_finding_state(
finding_name, Finding.INACTIVE, start_time=now_proto
)
print("New state: {}".format(Finding.State.Name(new_finding.state)))
# [END update_finding_state]
def test_trouble_shoot(source_name):
"""Demonstrate calling test_iam_permissions to determine if the
service account has the correct permisions."""
# [START test_iam_permissions]
from google.cloud import securitycenter
# Create a client.
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# Check for permssions to call create_finding or update_finding.
permission_response = client.test_iam_permissions(
source_name, ["securitycenter.findings.update"]
)
print(
"Permision to create or update findings? {}".format(
len(permission_response.permissions) > 0
)
)
# [END test_iam_permissions]
assert len(permission_response.permissions) > 0
# [START test_iam_permissions]
# Check for permissions necessary to call set_finding_state.
permission_response = client.test_iam_permissions(
source_name, ["securitycenter.findings.setState"]
)
print(
"Permision to update state? {}".format(len(permission_response.permissions) > 0)
)
# [END test_iam_permissions]
assert len(permission_response.permissions) > 0
def test_list_all_findings(organization_id):
# [START list_all_findings]
from google.cloud import securitycenter
# Create a client.
client = securitycenter.SecurityCenterClient()
# organization_id is the numeric ID of the organization. e.g.:
# organization_id = "111122222444"
org_name = "organizations/{org_id}".format(org_id=organization_id)
# The "sources/-" suffix lists findings across all sources. You
# also use a specific source_name instead.
all_sources = "{org_name}/sources/-".format(org_name=org_name)
finding_result_iterator = client.list_findings(all_sources)
for i, finding_result in enumerate(finding_result_iterator):
print(
"{}: name: {} resource: {}".format(
i, finding_result.finding.name, finding_result.finding.resource_name
)
)
# [END list_all_findings]
assert i > 0
def test_list_filtered_findings(source_name):
# [START list_filtered_findings]
from google.cloud import securitycenter
# Create a new client.
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# You an also use a wild-card "-" for all sources:
# source_name = "organizations/111122222444/sources/-"
finding_result_iterator = client.list_findings(
source_name, filter_='category="MEDIUM_RISK_ONE"'
)
# Iterate an print all finding names and the resource they are
# in reference to.
for i, finding_result in enumerate(finding_result_iterator):
print(
"{}: name: {} resource: {}".format(
i, finding_result.finding.name, finding_result.finding.resource_name
)
)
# [END list_filtered_findings]
assert i > 0
def test_list_findings_at_time(source_name):
# [START list_findings_at_a_time]
from google.cloud import securitycenter
from google.protobuf.timestamp_pb2 import Timestamp
from datetime import timedelta, datetime
# Create a new client.
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# You an also use a wild-card "-" for all sources:
# source_name = "organizations/111122222444/sources/-"
five_days_ago = Timestamp()
five_days_ago.FromDatetime(datetime.now() - timedelta(days=5))
# [END list_findings_at_a_time]
i = -1
five_days_ago.FromDatetime(datetime(2019, 3, 5, 0, 0, 0))
# [START list_findings_at_a_time]
finding_result_iterator = client.list_findings(source_name, read_time=five_days_ago)
for i, finding_result in enumerate(finding_result_iterator):
print(
"{}: name: {} resource: {}".format(
i, finding_result.finding.name, finding_result.finding.resource_name
)
)
# [END list_findings_at_a_time]
assert i == -1
def test_get_iam_policy(source_name):
"""Gives a user findingsEditor permission to the source."""
user_email = "[email protected]"
# [START get_source_iam]
from google.cloud import securitycenter
from google.iam.v1 import policy_pb2
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# Get the old policy so we can do an incremental update.
policy = client.get_iam_policy(source_name)
print("Policy: {}".format(policy))
# [END get_source_iam]
def test_group_all_findings(organization_id):
"""Demonstrates grouping all findings across an organization."""
# [START group_all_findings]
from google.cloud import securitycenter
# Create a client.
client = securitycenter.SecurityCenterClient()
# organization_id is the numeric ID of the organization. e.g.:
# organization_id = "111122222444"
org_name = "organizations/{org_id}".format(org_id=organization_id)
# The "sources/-" suffix lists findings across all sources. You
# also use a specific source_name instead.
all_sources = "{org_name}/sources/-".format(org_name=org_name)
group_result_iterator = client.group_findings(all_sources, group_by="category")
for i, group_result in enumerate(group_result_iterator):
print((i + 1), group_result)
# [END group_all_findings]
assert i > 0
def test_group_filtered_findings(source_name):
"""Demonstrates grouping all findings across an organization."""
# [START group_filtered_findings]
from google.cloud import securitycenter
# Create a client.
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
group_result_iterator = client.group_findings(
source_name, group_by="category", filter_='state="ACTIVE"'
)
for i, group_result in enumerate(group_result_iterator):
print((i + 1), group_result)
# [END group_filtered_findings]
assert i == 0
def test_group_findings_at_time(source_name):
"""Demonstrates grouping all findings across an organization as of
a specific time."""
i = -1
# [START group_findings_at_time]
from datetime import datetime, timedelta
from google.cloud import securitycenter
from google.protobuf.timestamp_pb2 import Timestamp
# Create a client.
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# Group findings as of yesterday.
read_time = datetime.utcnow() - timedelta(days=1)
timestamp_proto = Timestamp()
timestamp_proto.FromDatetime(read_time)
group_result_iterator = client.group_findings(
source_name, group_by="category", read_time=timestamp_proto
)
for i, group_result in enumerate(group_result_iterator):
print((i + 1), group_result)
# [END group_filtered_findings_at_time]
assert i == -1
def test_group_findings_and_changes(source_name):
"""Demonstrates grouping all findings across an organization and
associated changes."""
# [START group_filtered_findings_with_changes]
from datetime import timedelta
from google.cloud import securitycenter
from google.protobuf.duration_pb2 import Duration
# Create a client.
client = securitycenter.SecurityCenterClient()
# source_name is the resource path for a source that has been
# created previously (you can use list_sources to find a specific one).
# Its format is:
# source_name = "organizations/{organization_id}/sources/{source_id}"
# e.g.:
# source_name = "organizations/111122222444/sources/1234"
# List assets and their state change the last 30 days
compare_delta = timedelta(days=30)
# Convert the timedelta to a Duration
duration_proto = Duration()
duration_proto.FromTimedelta(compare_delta)
group_result_iterator = client.group_findings(
source_name, group_by="state_change", compare_duration=duration_proto
)
for i, group_result in enumerate(group_result_iterator):
print((i + 1), group_result)
# [END group_findings_with_changes]
assert i == 0
|
silverdev/google-cloud-python
|
securitycenter/docs/snippets_findings.py
|
snippets_findings.py
|
py
| 20,863 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15826998547
|
import random
from hackgen import TestInputFormat, TestGenerator, Language
class ClockDelayInputFormat(TestInputFormat):
"""
Input format of Clock Delay challenge.
https://www.hackerrank.com/contests/hourrank-28/challenges/clock-delay
"""
# difficulty levels with test file number
# difficulty level is [0-9]
__diff = [(5, 10), (10, 30), (50, 100), (100, 300), (100, 300),
(300, 600), (600, 900), (800, 1000), (900, 1000), (950, 1000)]
def inputs(self, difficult_level: int) -> None:
q = random.randint(*self.__diff[difficult_level]) # number of test cases
print(q)
for n in range(q):
# constraints for h1 m1 h2 m2 k
h1 = random.randint(0, 23)
m1 = random.randint(0, 60)
h2 = random.randint(h1, 24)
k = random.randint(h2 - h1 + 1 if h1 == h2 else h2 - h1, 24 - h1)
m2 = random.randint(0, (m1 if h1 + k == h2 else 60))
print(h1, m1, h2, m2)
print(k)
# input format instance
input_format = ClockDelayInputFormat()
# try with Language.java('Logic') also
test_generator = TestGenerator(10, input_format, Language.python('logic'), "ClockDelay")
test_generator.run()
|
renuka-fernando/hackgen
|
examples/clockdelay/clock_delay.py
|
clock_delay.py
|
py
| 1,236 |
python
|
en
|
code
| 11 |
github-code
|
6
|
5033798447
|
import random
import operator
RULE = "What is the result of the expression?"
def creating_quiestion_and_answer():
number1 = random.randint(1, 10)
number2 = random.randint(1, 10)
operation, function = random.choice([
('+', operator.add),
('-', operator.sub),
('*', operator.mul),
])
answer = function(number1, number2)
return "{} {} {}".format(number1, operation, number2), str(answer)
|
xegrassa/python-project-lvl1
|
brain_games/games/brain_calc.py
|
brain_calc.py
|
py
| 437 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72532676669
|
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
import urllib.parse
from pathlib import Path
from random import choice
from typing import Awaitable, Callable
import pytest
from aiohttp import web
from aiohttp.test_utils import TestClient
from faker import Faker
from models_library.api_schemas_storage import FileMetaDataGet, SimcoreS3FileID
from models_library.projects import ProjectID
from models_library.users import UserID
from pydantic import ByteSize, parse_obj_as
from pytest_simcore.helpers.utils_assert import assert_status
pytest_simcore_core_services_selection = ["postgres"]
pytest_simcore_ops_services_selection = ["adminer"]
async def test_get_files_metadata(
upload_file: Callable[[ByteSize, str], Awaitable[tuple[Path, SimcoreS3FileID]]],
client: TestClient,
user_id: UserID,
location_id: int,
project_id: ProjectID,
faker: Faker,
):
assert client.app
url = (
client.app.router["get_files_metadata"]
.url_for(location_id=f"{location_id}")
.with_query(user_id=f"{user_id}")
)
# this should return an empty list
response = await client.get(f"{url}")
data, error = await assert_status(response, web.HTTPOk)
assert not error
list_fmds = parse_obj_as(list[FileMetaDataGet], data)
assert not list_fmds
# now add some stuff there
NUM_FILES = 10
file_size = parse_obj_as(ByteSize, "15Mib")
files_owned_by_us = []
for _ in range(NUM_FILES):
files_owned_by_us.append(await upload_file(file_size, faker.file_name()))
# we should find these files now
response = await client.get(f"{url}")
data, error = await assert_status(response, web.HTTPOk)
assert not error
list_fmds = parse_obj_as(list[FileMetaDataGet], data)
assert len(list_fmds) == NUM_FILES
# create some more files but with a base common name
NUM_FILES = 10
file_size = parse_obj_as(ByteSize, "15Mib")
files_with_common_name = []
for _ in range(NUM_FILES):
files_with_common_name.append(
await upload_file(file_size, f"common_name-{faker.file_name()}")
)
# we should find these files now
response = await client.get(f"{url}")
data, error = await assert_status(response, web.HTTPOk)
assert not error
list_fmds = parse_obj_as(list[FileMetaDataGet], data)
assert len(list_fmds) == (2 * NUM_FILES)
# we can filter them now
response = await client.get(f"{url.update_query(uuid_filter='common_name')}")
data, error = await assert_status(response, web.HTTPOk)
assert not error
list_fmds = parse_obj_as(list[FileMetaDataGet], data)
assert len(list_fmds) == (NUM_FILES)
@pytest.mark.xfail(
reason="storage get_file_metadata must return a 200 with no payload as long as legacy services are around!!"
)
async def test_get_file_metadata_is_legacy_services_compatible(
client: TestClient,
user_id: UserID,
location_id: int,
simcore_file_id: SimcoreS3FileID,
):
assert client.app
url = (
client.app.router["get_file_metadata"]
.url_for(
location_id=f"{location_id}",
file_id=f"{urllib.parse.quote(simcore_file_id, safe='')}",
)
.with_query(user_id=f"{user_id}")
)
# this should return an empty list
response = await client.get(f"{url}")
await assert_status(response, web.HTTPNotFound)
async def test_get_file_metadata(
upload_file: Callable[[ByteSize, str], Awaitable[tuple[Path, SimcoreS3FileID]]],
client: TestClient,
user_id: UserID,
location_id: int,
project_id: ProjectID,
simcore_file_id: SimcoreS3FileID,
faker: Faker,
):
assert client.app
url = (
client.app.router["get_file_metadata"]
.url_for(
location_id=f"{location_id}",
file_id=f"{urllib.parse.quote(simcore_file_id, safe='')}",
)
.with_query(user_id=f"{user_id}")
)
# this should return an empty list
response = await client.get(f"{url}")
# await assert_status(response, web.HTTPNotFound)
# NOTE: This needs to be a Ok response with empty data until ALL legacy services are gone, then it should be changed to 404! see test above
assert response.status == web.HTTPOk.status_code
assert await response.json() == {"data": {}, "error": "No result found"}
# now add some stuff there
NUM_FILES = 10
file_size = parse_obj_as(ByteSize, "15Mib")
files_owned_by_us = []
for _ in range(NUM_FILES):
files_owned_by_us.append(await upload_file(file_size, faker.file_name()))
selected_file, selected_file_uuid = choice(files_owned_by_us)
url = (
client.app.router["get_file_metadata"]
.url_for(
location_id=f"{location_id}",
file_id=f"{urllib.parse.quote(selected_file_uuid, safe='')}",
)
.with_query(user_id=f"{user_id}")
)
response = await client.get(f"{url}")
data, error = await assert_status(response, web.HTTPOk)
assert not error
assert data
fmd = parse_obj_as(FileMetaDataGet, data)
assert fmd.file_id == selected_file_uuid
assert fmd.file_size == selected_file.stat().st_size
|
ITISFoundation/osparc-simcore
|
services/storage/tests/unit/test_handlers_files_metadata.py
|
test_handlers_files_metadata.py
|
py
| 5,239 |
python
|
en
|
code
| 35 |
github-code
|
6
|
74131098748
|
# coding:utf8
from app.game.core.Room import Room
from app.game.action import change
from app.util.common import func
from app.util.defines import dbname, status, games, origins, rule
from app.util.driver import dbexecute
class RoomPoker(Room):
def __init__(self):
super(RoomPoker, self).__init__()
self._dispatch_turn = [] # 玩家出牌顺序 [account_id, ...]
self._special_account_id = 0 # 猴子玩法中的拥有着ID
@property
def special_account_id(self):
return self._special_account_id
def choose_special_account_id(self):
if self._room_help == games.HELP_POKER_SPECIAL:
for player in self._players.values():
if player.is_special_card():
self._special_account_id = player.account_id
break
else:
self._special_account_id = 0
def is_special(self, account_id):
if self._room_help == games.HELP_POKER_SPECIAL and account_id == self._special_account_id:
return True
return False
@property
def operators(self):
return {}, {}
def get_original_execute(self):
if self._pre_win_account_id == 0:
# HeiTao3
for player in self._players.values():
if 1 in player.card_list:
self._pre_win_account_id = player.account_id
break
return self._pre_win_account_id
def get_room_data(self, account_id):
user_list = []
for _player in self._players.values():
user_list.append(_player.get_data())
player = self.get_player(account_id)
return {
'user_room': user_list,
'user_cards': player.card_list,
'execute_account_id': self._execute_account_id,
'last_account_id': self._last_account_id,
'last_cards': self._last_cards,
'user_id': self._account_id,
'rounds': self._rounds,
'max_rounds': self._max_rounds
}
def room_reset(self):
for player in self._players.values():
player.status = status.PLAYER_STATUS_NORMAL
player.player_reset()
self._cards = []
self._ready_list = []
self._execute_account_id = 0
self._last_account_id = 0
self._last_cards = []
self._switch_account_id = 0
self._rounds += 1
self._dispatch_turn = []
def add_dispatch_turn(self, account_id):
if account_id not in self._dispatch_turn:
self._dispatch_turn.append(account_id)
def room_point_change(self):
card_full_count = self._config['original_count']
all_player_info = dict()
win_player = None
win_point = 0
if self.is_special(self._pre_win_account_id):
special_ratio = 2
else:
special_ratio = 1
turn_list = self._dispatch_turn
for _account_id in self._player_list:
if _account_id not in turn_list:
turn_list.append(_account_id)
_player = self.get_player(_account_id)
left_card_count = _player.get_card_count()
change_point = 0
if _account_id != self._pre_win_account_id:
if self.is_special(_account_id):
special_dec_ratio = 2
else:
special_dec_ratio = 1
if left_card_count >= card_full_count:
full_point = card_full_count * 2 * special_ratio * special_dec_ratio
change_point = -full_point
win_point += full_point
elif left_card_count > 1:
dec_point = left_card_count * special_ratio * special_dec_ratio
change_point = -dec_point
win_point += dec_point
else:
change_point = 0
_player.lose_count = 1
_player.point_change(change_point)
if self.is_online_match():
change_gold = change_point * rule.ONLINE_RATIO
change.spend_gold(_account_id, -change_gold, origins.ORIGIN_ONLINE_MATCH)
_player.last_change_gold = -change_gold
else:
win_player = _player
_player.win_count = 1
all_player_info[_player.account_id] = {
'left_card_count': left_card_count,
'disptach_cards': _player.close_cards,
'change_point': change_point,
'point': _player.point
}
if win_player and win_point > 0:
win_player.point_change(win_point)
if self.is_online_match():
change_gold = win_point * rule.ONLINE_RATIO
change.award_gold(win_player.account_id, change_gold, origins.ORIGIN_ONLINE_MATCH)
win_player.last_change_gold = change_gold
info = all_player_info[win_player.account_id]
info['change_point'] = win_point
info['point'] = win_player.point
_info = []
for _account_id in turn_list:
_info.append(
[_account_id, all_player_info[_account_id]]
)
return _info
def room_save(self):
dbexecute.update_record(
table=dbname.DB_ROOM,
where={'room_id': self._room_id},
data=self.get_save_data())
def get_save_data(self):
return {'data': func.transform_object_to_pickle(self)}
|
alex-my/game-poker-server
|
app/game/core/RoomPoker.py
|
RoomPoker.py
|
py
| 5,616 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39459918108
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.home, name='index'),
# url(r'^games/(?P<steamid>[0-9]+)$', views.games, name='games'),
url(r'^home/$', views.home, name='home'),
url(r'^games/', views.games, name='games'),
url(r'^friends/', views.friends, name='friends'),
url(r'^calculator/', views.calculator, name='calculator')
]
|
ryanchesler/allauth_django
|
core/urls.py
|
urls.py
|
py
| 401 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34303995178
|
from tkinter import *
root = Tk()
root.title("Hello GUI") # 제목 설정
root.geometry("700x500+350+150") # 가로크기 * 세로크기 + x좌표 + y좌표
root.resizable(False, False) # x,y 크기 조정 가능 여부
listbox = Listbox(root, selectmode="extended", height=3) # height가 0이면 모든 요소가 다 보임
listbox.insert(0, "루저 걸")
listbox.insert(1, "저승으로 가는 버스에 타고 안녕")
listbox.insert(2, "나 홀로 숨바꼭질")
listbox.insert(3, "Rock한 너와 작별이야")
listbox.insert(END, "비교당하는 아이")
listbox.insert(END, "진흙탕 주제에 나만의 소중함을 빼앗으려 하다니")
listbox.pack()
def BtnCmd():
# listbox.delete(END) # 마지막 요소 삭제
# listbox.delete(1) # 지정한 인덱스의 요소 삭제
print(listbox.size()) # 개수 확인
print("1번째부터 3번째까지 노래 제목 : ", listbox.get(0,2))
print("현재 선택된 노래 순서 값 : ", listbox.curselection())
btn = Button(root, text="HIHI", command=BtnCmd)
btn.pack()
root.mainloop() # 창을 띄우는 함수
|
Penguin-God/Python_GUI_Programming
|
1_Basic/5_listbox.py
|
5_listbox.py
|
py
| 1,099 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
27250989956
|
import torch.nn.functional as F
import torch.nn as nn
import torch
filters = torch.tensor([[[[2, 0, 0],
[1, 0, 1],
[0, 3, 0]],
[[1, 0, 1],
[0, 0, 0],
[1, 1, 0]],
[[0, 0, 1],
[1, 1, 1],
[1, 1, 0]]]]) # [1,3,3,3] [ [filter_nums/output_channels,input_channels,high,width]
inputs = torch.tensor([0, 2, 0, 1, 0, 0, 2, 0, 1, 1, 2, 1, 2, 0, 0, 1, 0, 0, 1, 0, -1, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0,
# [batch_size,in_channels,high,width]
1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0]).reshape([1, 3, 5, 5])
bias = torch.tensor([1])
result = F.conv2d(inputs, filters, bias=bias, stride=1, padding=0)
print("输入数据为:", inputs)
print("输入数据的形状为:", inputs.shape)
print("卷积核的形状:[filter_nums/output_channels,input_channels,high,width] ==>", filters.shape)
print("卷积后的结果:", result)
print("结果的形状:", result.shape)
|
moon-hotel/DeepLearningWithMe
|
Archived/02_ConvolutionalNN/01_CNNOP/01_CnnOpSingleFilter.py
|
01_CnnOpSingleFilter.py
|
py
| 1,225 |
python
|
en
|
code
| 116 |
github-code
|
6
|
33671678331
|
from __future__ import annotations
from datetime import datetime
from datetime import timedelta
from unittest.mock import MagicMock
import pytest
from common.reddit_client import RedditClient
from prawcore.exceptions import Forbidden
from requests import Response
@pytest.fixture
def reddit_client():
return RedditClient(
reddit_client_id="YOUR_CLIENT_ID",
reddit_client_secret="YOUR_CLIENT_SECRET",
reddit_user_agent="YOUR_USER_AGENT",
)
@pytest.fixture
def subreddit_mock(reddit_client):
reddit_client.reddit_client.subreddit = MagicMock()
return reddit_client.reddit_client.subreddit.return_value
@pytest.fixture
def post_data():
date = datetime(2023, 7, 28)
post1 = MagicMock()
post1.id = "post1"
post1.title = "Test Post 1"
post1.selftext = "This is test post 1."
post1.subreddit.display_name = "test_subreddit"
post1.upvote_ratio = 0.75
post1.ups = 10
post1.downs = 2
post1.total_awards_received = 1
post1.num_comments = 5
post1.created_utc = date.timestamp()
post2 = MagicMock()
post2.id = "post2"
post2.title = "Test Post 2"
post2.selftext = "This is test post 2."
post2.subreddit.display_name = "test_subreddit"
post2.upvote_ratio = 0.80
post2.ups = 15
post2.downs = 3
post2.total_awards_received = 2
post2.num_comments = 8
post2.created_utc = (date + timedelta(days=1)).timestamp()
return [post1, post2]
@pytest.fixture
def subreddit_new_mock(subreddit_mock, post_data):
subreddit_mock.new = MagicMock(return_value=post_data)
return subreddit_mock.new
def test_remove_submissions_not_on_date(reddit_client):
date = datetime(2021, 10, 4).date()
submissions = [
{"created_utc": 1633393174},
{"created_utc": 1633306774},
{"created_utc": 1633220374},
]
expected_submissions = [{"created_utc": 1633306774}]
result = reddit_client._remove_submissions_not_on_date(submissions, date)
assert result == expected_submissions
def test_fetch_submissions_made_on_date_forbidden_error(reddit_client, subreddit_new_mock):
forbidden_response = Response()
forbidden_response.status_code = 403
subreddit_new_mock.side_effect = Forbidden(forbidden_response)
result = reddit_client.fetch_submissions_made_on_date("test_subreddit", datetime(2023, 7, 28))
assert result == []
def test_fetch_submissions_made_on_date_no_posts(reddit_client, subreddit_new_mock):
subreddit_new_mock.return_value = []
result = reddit_client.fetch_submissions_made_on_date("test_subreddit", datetime(2023, 7, 28))
assert result == []
|
kelvinou01/university-subreddits
|
tests/unit/test_reddit_client.py
|
test_reddit_client.py
|
py
| 2,642 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30188434989
|
import bluetooth
class Alpha1S:
"""
Class to control the Ubtech Alpha 1S robot
"""
def __init__(self, name="ALPHA 1S"):
self.__bt = self.Alpha1S_bluetooth(name)
def battery(self):
"""
Get battery information.
Returns:
dict: Dictionary with fields:
percent: Remaining battery capacity
state:
0: Battery not charging
1: Battery charging
2: Battery not present
mV: Battery voltage in mV
"""
msg = b'\x18\x00'
parameter_len = 4
ans = self.__bt.read(msg, parameter_len)
if ans is not None:
battery = {
"percent": int.from_bytes(ans[3:], "big"),
"state": int.from_bytes(ans[2:3], "big"),
"mV": int.from_bytes(ans[:2], "big")
}
return battery
return None
def leds(self, state):
"""
Turn LEDs on or off.
Parameters:
state: Set True to turn on, False to turn off.
"""
if state:
state = b'\x01'
else:
state = b'\x00'
msg = b'\x0D' + state
self.__bt.write(msg)
def servo_read(self, servo_id):
"""
Read the position of a single servo.
Note: Reading a servo will automatically power it off.
Parameters:
servo_id: Servo id between 0-15, integer
Returns:
int: Position of the servo between 0-180
"""
# Adding 1 to the servo_id because the robot starts counting at 1
servo_id = bytes([servo_id+1])
msg = b'\x24' + servo_id
parameter_len = 2
ans = self.__bt.read(msg, parameter_len)
if ans is not None:
# Check that the received value corresponds to the specified servo
if ans[:1] == servo_id:
return int.from_bytes(ans[1:], "big")
return None
def servo_read_all(self):
"""
Read the positions for all the servos simultaneously.
Note: Reading a servo will automatically power it off.
Returns:
[int]: List of 16 integer positions between 0-180
"""
msg = b'\x25\x00'
parameter_len = 16
ans = self.__bt.read(msg, parameter_len)
if ans is not None:
return [x for x in ans]
return None
def servo_write(self, servo_id, angle, travelling=20):
"""
Set a specific servo to an angle.
Parameters:
servo_id: Servo id between 0-15, integer
angle: Angle between 0-180, integer
travelling: Time the servo takes to move to the position
Returns:
int: Error code:
0: Success
1: Wrong servo servo_id
2: Allow servo angle excess
3: No reply from servo
"""
# Adding 1 to the servo_id because the robot starts counting at 1
servo_id = bytes([servo_id+1])
angle = bytes([angle])
run_time = bytes([travelling])
time_frames = b'\x00\x10'
msg = b'\x22' + servo_id + angle + run_time + time_frames
parameter_len = 2
ans = self.__bt.read(msg, parameter_len)
if ans is not None:
# Check that the received value corresponds to the specified servo
if ans[:1] == servo_id:
return int.from_bytes(ans[1:], "big")
return None
def servo_write_all(self, angles, travelling=20):
"""
Set all servos to the specified positions simultaneously.
Parameters:
angles: List of integer angles between 0-180
travelling: Time the servo takes to move to the position, integer
Returns:
[int]: List of error codes for each servo:
0: Success
1: Wrong servo servo_id
2: Allow servo angle excess
3: No reply from servo
"""
if len(angles) != 16:
return None
angles = bytearray(angles)
run_time = bytes([travelling])
time_frames = b'\x00\x10'
msg = b'\x23' + angles + run_time + time_frames
parameter_len = 16
ans = self.__bt.read(msg, parameter_len)
if ans is not None:
return [x for x in ans]
return None
def servo_off(self):
"""
Send command to power off all the servos in the robot.
"""
msg = b'\x0C\x00'
self.__bt.write(msg)
class Alpha1S_bluetooth:
"""
Class to handle the Alpha1S' bluetooth protocol
Download Bluetooth protocol datasheet from
https://assets-new.ubtrobot.com/downloads/Alpha%201%20Series%20Bluetooth%20communication%20protocol?download
""" # noqa
def __init__(self, name):
address = self.__discover(name)
assert(address is not None), f"Error: {name} not found"
self.__connect(address)
def __del__(self):
self.sock.close()
def __discover(self, name):
address = None
devices = bluetooth.discover_devices(lookup_names=True)
for add, text in devices:
if text == name:
address = add
break
return address
def __connect(self, addr):
self.sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
self.sock.connect((addr, 6))
self.sock.settimeout(10.0)
def write(self, msg):
"""
Compose an outgoing message following the required format and send
over the bluetooth socket. Takes bytes as input.
"""
cmd = self.__compose(msg)
self.sock.send(cmd)
def read(self, msg, ans_len):
"""
Use the write() function to send a command and receive its answer.
Returns the 'Parameter' field in bytes if the message was received
correctly, None otherwise.
"""
self.write(msg)
# Length is sum of header(2), length, check, cmd, ans_len and end
length = 6 + ans_len
ans = self.sock.recv(length)
if self.__check(ans):
return ans[4:-2]
return None
def __compose(self, msg):
"""
Compose a byte message with the header, length, check and end
bytes in the required format.
"""
header = b'\xFB\xBF'
end = b'\xED'
# Length is sum of header(2), length, check + msg bytes
length = bytes([4 + len(msg)])
# Check is sum of length + msg (length+(cmd+params)), with modulus
# to fit into a single byte
check_list = bytearray(length)
check_list.extend(msg)
check = bytes([sum(check_list) % 256])
return header + length + msg + check + end
def __check(self, msg):
"""
Check that the received message follows the correct format and that
the check byte is correct.
Returns True if message is correct, False otherwise
"""
msg = bytearray(msg)
# Check that header is correct
if msg[:2] != b'\xFB\xBF':
return False
# Check that ending is correct
elif msg[-1:] != b'\xED':
return False
# Check that check byte is correct
elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):
return False
else:
return True
|
alvaroferran/Alpha1S
|
alpha1s/__init__.py
|
__init__.py
|
py
| 7,677 |
python
|
en
|
code
| 6 |
github-code
|
6
|
70809071868
|
# ------------------------------------------------------------------#
# AMPBA - 2021 Winter :: Data Collection assignment - PART2 #
# Group Id : 3 #
# Authors: #
# Nishant Jalasutram - PG ID: 012020051 #
# Ila Barshilia - PG ID: 012020022 #
# Deep Kamal Singh - PG ID: 012020053 #
# ------------------------------------------------------------------#
'''
Part 2:
For each match, go to the scorecard link like
https://www.espncricinfo.com/series/icc-cricket-world-cup-2019-1144415/india-vs-new-zealand-1st-semi-final-1144528/full-scorecard and extract the following:
1. Player of the match with the picture. Save the url to the picture in the csv.
2. Country that the player of the match belongs to.
3. Runs scored by every batsman.
4. Balls played by every batsman.
5. Strike rate for every batsman.
6. Wickets taken by every bowler.
7. Economy rate for every bowler.
8. which country won the toss.
9. who were the umpires?
10. who was the match referee
Save results in a file [your group no]_matchDetails.csv.
Name the .py file as [your group no]_matchDetails.py.
'''
import scrapy
# As per Assignments requirement we need to start our file name with GROUP number, thus we have to use import this
# way, instead of simply writing from <File_name> import <ClassName>
CricinfoPlayerProfileSpider = __import__(
'3_playerDetails').CricinfoPlayerProfileSpider # ,fromlist=['3_playerDetails'])
class CWC2019MatchStatsSpider(scrapy.Spider):
name = "cwc_2019_Scorecard_spider"
cricinfo_host = 'https://www.espncricinfo.com'
playerProfileParser = CricinfoPlayerProfileSpider(scrapy.Spider)
# This method will be called for every inning of the match , and it will extract Batsmen and Bowlers
def parseInning(self, anInning):
return {
"batsmen": self.parseBatsmen(anInning.css("table[class='table batsman']")),
"bowlers": self.parseBowler(anInning.css("table[class='table bowler']")),
}
# This method extracts match details - iterates over every TR and creates a KV Pair dictionary
# thus making it independent of sequence of occurrence
def parseMatchDetails(self, matchDetailSection):
trs = matchDetailSection.css("tbody").css("tr")
returnDict = {}
for aRow in trs:
tds = aRow.css("td")
if tds is not None and len(tds) > 1:
returnDict[tds[0].css('::text').extract()[0]] = tds[1].css('::text').extract()[0] if len(
tds[1].css('::text').extract()) == 1 else ",".join(tds[1].css('::text').extract())
return returnDict
# This method extract Player of the match information
# however, we found the player of the match image is lazy loaded and Scrapy is not good enough for this
# we tried with Selenium as well as with Beautifulsoup which made the code messy and inconsistent,
# so we are now getting player pic in PART3 execution,and setting it back inside PART2 data set
def parsePlayerOfMatchSection(self, playerOfMatchSection):
return {
"player_of_the_match": playerOfMatchSection.css('div[class="best-player-name"]').css(
'a::text').extract_first(),
"player_of_the_match_profile": playerOfMatchSection.css('div[class="best-player-name"]').css(
'a::attr(href)').extract_first(),
"player_of_the_match_image_url": "", # We are not loading image now, as it will be lazyimg.png anyway -
# we will get the player image from PART3 output dictionary later
"player_of_the_match_country": playerOfMatchSection.css(
'span[class="best-player-team-name"]::text').extract_first()
}
# Extracts batsmen details, also takes care of batsmen who did not bat
def parseBatsmen(self, battingTable):
# batting table parsing
batsmenList = []
for aBattingRow in battingTable.css("tbody").css("tr"):
tds = aBattingRow.css("td::text").extract()
if aBattingRow.css('.batsman-cell').css("a::text").extract_first() is not None:
# Found that when batsman is NOT out we get back "not out" in first element instead of RUNS,
# handling this
if tds[0].isnumeric():
batsmenList.append({
"name": aBattingRow.css('.batsman-cell').css("a::text").extract_first().strip(),
"profile_url": aBattingRow.css('.batsman-cell').css("a::attr('href')").extract_first(),
"runs": tds[0],
"balls_played": tds[1],
"strike_rate": tds[5]
})
else:
batsmenList.append({
"name": aBattingRow.css('.batsman-cell').css("a::text").extract_first().strip(),
"profile_url": aBattingRow.css('.batsman-cell').css("a::attr('href')").extract_first(),
"runs": tds[1],
"balls_played": tds[2],
"strike_rate": tds[6]
})
# Are there any "Yet to bat" players - lets add them too
if len(batsmenList) < 11 and len(battingTable.css("tfoot").css("tr")) > 1:
didNotBatLinks = battingTable.css("tfoot").css("tr")[1].css("td")[0].css("div")[0].css("a")
for aPlayer in didNotBatLinks:
batsmenList.append({
"name": aPlayer.css('span::text').extract_first().strip().replace(" ", ""),
"profile_url": aPlayer.css("::attr(href)").extract_first(),
"runs": "",
"balls_played": "",
"strike_rate": ""
})
return batsmenList
# Extracts Bowler details
def parseBowler(self, bowlingScores):
# parsing bowling scores to extract each bowler
bowlerList = []
for aBowlingStatRow in bowlingScores.css("tbody").css("tr"):
tds = aBowlingStatRow.css("td::text").extract()
if aBowlingStatRow.css('.text-nowrap').css("a::text").extract_first() is not None:
bowlerList.append({
"name": aBowlingStatRow.css('.text-nowrap').css("a::text").extract_first().strip(),
"profile_url": aBowlingStatRow.css('.text-nowrap').css("a::attr('href')").extract_first(),
"wickets": tds[3],
"econ": tds[4]
})
return bowlerList
# This function is called when Part 1 yields a scrapy request
# which is processed by Scrapy and response is handed over to this function
# in addition it is passed with match_details_dict which is dictionary to store the match details
# after crawling ends, the dictionary is used to output CSV file
# This method checks whether Match is having an outcome or is it abandoned or draw
def parse(self, resp, match_number, match_details_dict, match_players_dict):
inning = {}
batsmen = []
bowlers = []
# tsvDict = {}
# checking if match is abandoned
if len(resp.css(".Collapsible").css(".Collapsible__contentInner").css(
"table[class='w-100 table batsman']")) > 0:
# Match seems abandoned, iterate over .batsman .small , to get player list
for aBatsman in resp.css(".Collapsible").css(".Collapsible__contentInner"). \
css("table[class='w-100 table batsman']"). \
css("tbody").css("tr").css("td").css("a"):
batsmen.append({
"name": aBatsman.css('::text').extract_first().strip().replace(" ", ""),
"profile_url": aBatsman.css('::attr(href)').extract_first(),
"runs": "",
"balls_played": "",
"strike_rate": ""
})
match_detail = self.parseMatchDetails(resp.css("table[class='w-100 table match-details-table']"))
tsvDict = {
"player_of_the_match": "",
"player_of_the_match_image_url": "",
"player_of_the_match_country": "",
"batsmen_runs": "",
"batsmen_ball_played": "",
"batsmen_strike_rate": "",
"bowlers_wickets": "",
"bowlers_econ_rate": "",
"toss_won_by": match_detail["Toss"].split(",")[0].strip(),
"umpires": match_detail["Umpires"] + ",TV:" + match_detail["TV Umpire"] + ",Reserve:" + match_detail[
"Reserve Umpire"],
"match_referee": match_detail["Match Referee"]
}
else:
# valid non-abandoned match, follow normal processing
best_player_details = self.parsePlayerOfMatchSection(resp.css("div[class='best-player']"))
for anInning in resp.css(".Collapsible"):
inningCountry = anInning.css("h5").css(".header-title.label::text").extract_first().split("INNINGS")[
0].strip()
inning[inningCountry] = self.parseInning(anInning.css(".Collapsible__contentInner"))
batsmen.extend(inning[inningCountry]["batsmen"])
bowlers.extend(inning[inningCountry]["bowlers"])
batsmen_run_csv = ",".join([batter["runs"] for batter in batsmen])
batsmen_balls_csv = ",".join([batter["balls_played"] for batter in batsmen])
batsmen_strike_rate_csv = ",".join([batter["strike_rate"] for batter in batsmen])
bowlers_wickets_csv = ",".join([bowler["wickets"] for bowler in bowlers])
bowlers_econ_rate_csv = ",".join([bowler["econ"] for bowler in bowlers])
scrapedScorecard = {"innings": inning,
"match_detail": self.parseMatchDetails(
resp.css("table[class='w-100 table match-details-table']"))}
tsvDict = {
"player_of_the_match": best_player_details["player_of_the_match"],
"player_of_the_match_image_url": best_player_details["player_of_the_match_image_url"],
"player_of_the_match_country": best_player_details["player_of_the_match_country"],
"batsmen_runs": batsmen_run_csv,
"batsmen_ball_played": batsmen_balls_csv,
"batsmen_strike_rate": batsmen_strike_rate_csv,
"bowlers_wickets": bowlers_wickets_csv,
"bowlers_econ_rate": bowlers_econ_rate_csv,
"toss_won_by": scrapedScorecard["match_detail"]["Toss"].split(",")[0].strip(),
"umpires": scrapedScorecard["match_detail"]["Umpires"] + ",TV:" + scrapedScorecard["match_detail"][
"TV Umpire"] + ",Reserve:" + scrapedScorecard["match_detail"]["Reserve Umpire"],
"match_referee": scrapedScorecard["match_detail"]["Match Referee"]
}
match_details_dict[match_number] = tsvDict
players = batsmen
players.extend(bowlers)
# Invoke processing for part 3 for every player
for aPlayer in players:
# Duplication check :: SCRAPY checks that automatically,
# will fetch only if the new request is not already fetched
yield scrapy.Request(resp.urljoin(aPlayer["profile_url"]), callback=self.playerProfileParser.parse,
cb_kwargs={'match_players_dict': match_players_dict})
|
deepkamal/DC_AMPBA_W2021
|
3_matchDetails.py
|
3_matchDetails.py
|
py
| 11,760 |
python
|
en
|
code
| 0 |
github-code
|
6
|
50777891
|
from typing import List
class Solution:
def dailyTemperatures(self, temperatures: List[int]) -> List[int]:
ans = [0] * len(temperatures)
stack = [0]
for i in range(1, len(temperatures)):
if temperatures[i] <= temperatures[stack[-1]]:
# if temp is not larger, just push into stack
stack.append(i)
else:
while len(stack) != 0 and temperatures[i] > temperatures[stack[-1]]:
# if temp is larger, update the index
ans[stack[-1]] = i - stack[-1]
stack.pop()
stack.append(i)
return ans
if __name__ == "__main__":
temperatures = [73,74,75,71,69,72,76,73]
s = Solution()
print(s.dailyTemperatures(temperatures))
|
code-cp/leetcode
|
solutions/739/main.py
|
main.py
|
py
| 802 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25008129901
|
from osv import fields, osv
import time
import netsvc
class mrp_reversed_bom(osv.osv_memory):
_name = "mrp.reversed.bom"
_description = "Reversed Bom"
_columns = {
}
def do_reverse(self, cr, uid, ids, context={}):
""" To check the product type
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs if we want more than one
@param context: A standard dictionary
@return:
"""
bom_obj = self.pool.get('mrp.bom')
bom_ids = bom_obj.browse(cr, uid, context['active_ids'])
mrp_prod_obj = self.pool.get('mrp.production')
mrp_prodline_obj = self.pool.get('mrp.production.product.line')
for bom_id in bom_ids:
production_id = mrp_prod_obj.create(cr, uid, {
'origin': 'BOM '+ bom_id.name,
'product_qty': bom_id.product_qty,
'product_id': bom_id.product_id.id or False,
'product_uom': bom_id.product_uom.id or False,
'product_uos_qty': bom_id.product_uos_qty,
'location_src_id': (bom_id.routing_id and bom_id.routing_id.location_id and bom_id.routing_id.location_id.id) or False,
'location_dest_id': (bom_id.routing_id and bom_id.routing_id.location_id and bom_id.routing_id.location_id.id) or False,
'product_uos': bom_id.product_uos.id or False,
'bom_id': bom_id.id or False,
'date_planned': time.strftime('%Y-%m-%d %H:%M:%S'),
})
for line in bom_id.bom_lines:
production_line_id = mrp_prodline_obj.create(cr, uid, {'product_id': line.product_id.id,
'name' : line.name,
'product_qty' : -line.product_qty,
'product_uom' : line.product_uom.id or False,
'location_src_id': bom_id.routing_id.location_id.id or bom_id.routing_id.location_id.id or False,
'location_id': (line.routing_id and line.routing_id.location_id and line.routing_id.location_id.id) or (bom_id.routing_id and bom_id.routing_id.location_id and bom_id.routing_id.location_id.id) or False,
'product_uos_qty': -line.product_uos_qty,
'product_uos': line.product_uos.id or False,
'production_id': production_id
})
bom_result = self.pool.get('mrp.production').action_compute(cr, uid,
[production_id], properties=[x.id for x in bom_id.property_ids if x])
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'mrp.production', production_id, 'button_confirm', cr)
return {}
mrp_reversed_bom()
|
factorlibre/openerp-extra-6.1
|
reverse_bom/wizard/mrp_bom_reverted.py
|
mrp_bom_reverted.py
|
py
| 3,177 |
python
|
en
|
code
| 9 |
github-code
|
6
|
6313183972
|
from django.urls import path, include
from django.conf.urls import url
from mysite import views
urlpatterns = [
path('search/', views.search_view, name='search'),
url(r'^request/(?P<record_id>[-\w]+)/$', views.send_req_view, name='request'),
path('requests/', views.req_view, name='requests'),
path('profile/', views.profile_view, name='profile'),
url(r'^profile/edit/$', views.edit_profile, name='edit_profile'),
path('', views.home_view, name='home'),
path('about/', views.about_view, name='about'),
url(r'^requests/delete/(?P<req_id>[-\w]+)/$', views.delete_req_view, name='d_request'),
url(r'^requests/accept/(?P<req_id>[-\w]+)/$', views.accept_req_view, name='accept_request'),
url(r'^requests/reject/(?P<req_id>[-\w]+)/$', views.reject_req_view, name='reject_request'),
path('notifications/', views.notification_view, name='notifications'),
path('profile/become_donor', views.become_donor_view, name='become_donor'),
path('add_record', views.add_record_view, name='add_record'),
]
|
abpopal/sehat.af
|
mysite/urls.py
|
urls.py
|
py
| 1,044 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21344672699
|
import csv
import html
import pickle
from util import md5sum
"""
Loader class for turning a Twitter Archive into a list of tweets to be used later on.
Source file is expected at ./tweets.csv , which any twitter user can download
as their Twitter Archive.
"""
class TweetLoader:
"""
Constructor.
Arguments:
- ignore_retweets: Whether or not to ignore retweets (default True)
- ignore_replies: Whether or not to ignore replies (default False)
"""
def __init__(
self,
ignore_retweets=True,
ignore_replies=False):
self._ignore_retweets = ignore_retweets
self._ignore_replies = ignore_replies
# Try to load an existing archive
loaded_data = self._load_sample()
if not loaded_data:
# Couldn't load a sample. Try to generate one and write it out
tweet_sample = self._generate_sample_and_write()
if not tweet_sample:
raise LoaderException('Couldn\'t load sample or Twitter Archive')
else:
(archive_hash, tweet_sample) = loaded_data
# Check hash against disk version
try:
current_archive_hash = md5sum('tweets.csv')
if current_archive_hash != archive_hash:
# We should try regenerating the archive
old_tweet_sample = tweet_sample
tweet_sample = self._generate_sample_and_write()
# Keep the one we loaded from disk if generating a new one failed
if not tweet_sample:
tweet_sample = old_tweet_sample
except IOError:
# We could not load the Twitter Archive for some reason
# Fall back to the deserialised version, but first Check
# that we have something left
if not tweet_sample:
tweet_sample = loaded_data[1]
self._tweet_sample = tweet_sample
"""
Returns the list of tweets that were loaded
"""
def get_tweets(self):
return self._tweet_sample
"""
Attempts to generate a new tweet sample from a Twitter Archive.
* Loads the Twitter Archive from disk
* Filters the Archive
* Attempts to write it back out to disk
Returns: A list of tweets in the tweet sample if one could be loaded and
filtered; None otherwise
"""
def _generate_sample_and_write(self):
(archive_hash, tweet_sample) = self._load_filtered_archive()
if not tweet_sample:
return False
self._write_sample(archive_hash, tweet_sample)
return tweet_sample
"""
Loads the Twitter Archive from the tweets.csv file, keeping only
tweets that match the filters specified in the constructor
Arguments:
- filename: the location of the Twitter Archive (default 'tweets.csv')
Returns: A tuple where the first element is the md5 hash of the archive
and the second is a list of the filtered tweets. Returns None if
the archive could not be loaded.
"""
def _load_filtered_archive(self, filename='tweets.csv'):
try:
with open(filename) as twitter_archive:
tweetreader = csv.DictReader(twitter_archive)
#### Twitter Archive Headers:
# tweet_id
# in_reply_to_status_id
# in_reply_to_user_id
# timestamp
# source
# text
# retweeted_status_id
# retweeted_status_user_id
# retweeted_status_timestamp
# expanded_urls
tweet_sample = []
for tweet in tweetreader:
# Check if we should ignore the tweet or not
if self._ignore_retweets and tweet['retweeted_status_id']:
continue
if self._ignore_replies and tweet['in_reply_to_user_id']:
continue
tweet_sample.append(html.unescape(tweet['text']))
if len(tweet_sample) < 1:
return None
archive_hash = md5sum(filename)
return (archive_hash, tweet_sample)
except IOError:
return None
"""
Writes the tweet sample to disk.
Arguments:
- tweet_sample: A list of tweets
- filename: The file to write to (default: 'tweets.dat')
Returns: True if writing was successful, False otherwise
"""
def _write_sample(self, archive_hash, tweet_sample, filename='tweets.dat'):
try:
with open('tweets.dat', mode='wb') as tweet_data:
data = (archive_hash, tweet_sample)
pickle.dump(data, tweet_data)
return True
except IOError:
return False
"""
Attempts to load a filtered tweet sample from disk
Returns: A tuple where the first element is the md5 hash of the archive from
which the tweets were taken, and the second is a list of the filtered
tweets. Returns None if deserialisation was unsuccessful.
"""
def _load_sample(self):
try:
with open('tweets.dat', mode='rb') as tweet_data:
deserialised = pickle.load(tweet_data)
return deserialised
except IOError:
return None
"""
Generic Exception for errors in the TweetLoader class
"""
class LoaderException(Exception):
def __init__(self, message):
self.message = message
|
kymagic/yaebooks-twitter
|
ebooks/tweetloader.py
|
tweetloader.py
|
py
| 5,625 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7998920894
|
import random
from . import users
from flask import request
import json
##### Hier kommt code, den ich einfach von Tina so übernommen habe. Wenn er schlecht ist ist Tina schuld ############
import os
from dotenv import load_dotenv
from flask import jsonify, Response
import pymongo
import hashlib # Die brauchen wir für die Passwörter
import string
from bson.json_util import dumps
load_dotenv() # use dotenv to hide sensitive credential as environment variables
DATABASE_URL = f'mongodb+srv://{os.environ.get("dbUser")}:{os.environ.get("dbPasswort")}' \
'@flask-mongodb-atlas.wicsm.mongodb.net/' \
'flaura?retryWrites=true&w=majority' # get connection url from environment
client = pymongo.MongoClient(DATABASE_URL) # establish connection with database
# plants.config['MONGO_DBNAME'] = 'restdb'
# plants.config['MONGO_URI'] = 'mongodb://localhost:27017/restdb'
# mongo = PyMongo(plants)
mydb = client.flaura
mycol = mydb.users
def jsonResponse(data):
return Response(data, mimetype='application/json')
##################### Ende des Codes, den ich einfach nur von Tina übernommen habe #######################
###### Dokumentation der Daten für Login
###### E-Mail: userelement["email"]
###### Passwort: userelement["pwsha256"] <- Dieses Feld soll einen SHA256-Hash von dem Passwort enthalten, dieser ist gesaltet mit "TUCLAB21"
pwsalt = "TUCLAB21"
def getAllUsers():
cursor = mycol.find({})
list_cur = list(cursor)
ppl = dumps(list_cur)
return ppl
def generatePWhash(password):
zuhashen = password + pwsalt
return hashlib.sha256(zuhashen.encode('utf-8')).hexdigest()
def generateLoginToken(length=32):
# Nach https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits
return ''.join(
random.SystemRandom().choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in
range(length))
def generatePotToken(length=10):
# TODO was passiert wenn zwei Pots zufällig den gleichen Token bekommen
# Nach https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(length))
@users.route('/users')
def usersA():
return jsonResponse(getAllUsers())
@users.route('/users/loginForm')
def loginForm():
return '<center>Ja Moin. Dann registrier dich mal ne Runde!<br><br><form method="post" action="/api/users/register">Name: <input type="text" name="name"><br>E-Mail: <input type="email" name="email"><br>E-Mail wiederholen: <input type="email" name="emailconfirm"><br>Passwort: <input type="password" name="password"><br>Passwort wiederholen: <input type="password" name="passwordconfirm"><br><br><input type="submit" value="Abschicken"></form> </center>'
# Diese Seite funktioniert nicht mehr, seit wir in der API-Funktion einen anderen Typ erwarten
@users.route('/api/users/register', methods=["POST"])
def registerUser():
### Diese Funktion möchte eine E-Mail, zwei Passwörter und trägt den User in die DB ein
### "email", "password", "passwordconfirm"
bestesAntwortDict = {}
reqJson = request.get_json(force=True)
if reqJson["password"] != reqJson["passwordconfirm"]:
bestesAntwortDict["msg"] = "Passwörter stimmen nicht überein."
bestesAntwortDict["successful"] = False
return dumps(bestesAntwortDict)
#if mycol.countDocuments({"email": reqJson["email"]}) > 0:
# bestesAntwortDict["msg"] = "Email already exists"
# bestesAntwortDict["successful"] = False
# return dumps(bestesAntwortDict)
iniLoginToken = generateLoginToken()
newuser = {"email": reqJson["email"], "pwsha256": generatePWhash(request.json["password"]), "pots": [],
"tokens": [iniLoginToken]}
new_id = mycol.insert_one(newuser).inserted_id
bestesAntwortDict["successful"] = True
bestesAntwortDict["initialToken"] = iniLoginToken
return dumps(bestesAntwortDict)
@users.route('/api/users/loginRequest', methods=["POST"])
def attemptLogin():
bestesAntwortDict = {}
reqJson = request.get_json(force=True)
susUser = mycol.find_one({"email": reqJson["email"]})
## ToDo: Was machen wir, wenn es diesen User gar nicht gibt? Was kommt dann als Antowort zurück? Niemand weiß es...
# Gucke, ob das was wir gesendet bekommen haben nachdem es durch die Funktion gejagt wurde mit dem übereinstimmt was bei uns in der DB steht
if (generatePWhash(reqJson["password"]) == susUser["pwsha256"]):
bestesAntwortDict["msg"] = "Login erfolgreich"
## Wir generieren uns einen Token, mit dem man sich dann später identifizieren kann
newtoken = generateLoginToken()
if "tokens" in susUser.keys():
susUser["tokens"].append(newtoken)
else:
susUser["tokens"] = [newtoken]
mycol.save(susUser)
bestesAntwortDict["token"] = newtoken
bestesAntwortDict["loginSuccessful"] = True
else:
bestesAntwortDict["msg"] = "Login nicht erfolgreich"
bestesAntwortDict["loginSuccessful"] = False
return json.dumps(bestesAntwortDict)
@users.route('/api/users/logoutRequest', methods=["POST"])
def attemptLogout():
bestesAntwortDict = {}
susUser = mycol.find_one({"tokens": request.get_json(force=True)["token"]})
## ToDo: Was machen wir, wenn es diesen User gar nicht gibt? Was kommt dann als Antowort zurück? Niemand weiß es...
# Diese Funktion löscht den Login-Token, mit dem sie aufgerufen wurde aus der DB
if (susUser != None):
bestesAntwortDict["msg"] = "Logout erfolgreich"
susUser["tokens"].remove(request.json["token"])
mycol.save(susUser)
bestesAntwortDict["logoutSuccessful"] = True
else:
bestesAntwortDict["msg"] = "Logout nicht erfolgreich"
bestesAntwortDict["logoutSuccessful"] = False
return json.dumps(bestesAntwortDict)
@users.route('/api/users/getUser', methods=["POST"])
def getUserInfobyToken():
# Diese Funktion will einen token im POST-Header haben, und wenn es ein echter ist, kommt das entsprechende User-Objekt zurück
bestesAntwortDict = {}
susUser = mycol.find_one({"tokens": request.get_json(force=True)["token"]})
if susUser == None:
# Wenn nach dieser Suchaktion susUser None ist, dann war das kein richtiger Token!
bestesAntwortDict["msg"] = "Incorrect token"
bestesAntwortDict["error"] = True
return jsonResponse(json.dumps(bestesAntwortDict))
else:
return jsonResponse(dumps(susUser))
@users.route('/api/users/newPot', methods=["POST"])
def createNewPot():
# Diese Funktion bekommt einen Login-Token übergeben. Sie denkt sich einen Pot-Token für den neuen Pot aus, erzeugt ihn und fügen ihn hinzu
bestesAntwortDict = {}
susUser = mycol.find_one({"tokens": request.get_json(force=True)["token"]})
if susUser == None:
# Wenn nach dieser Suchaktion susUser None ist, dann war das kein richtiger Token!
bestesAntwortDict["msg"] = "Incorrect token"
bestesAntwortDict["error"] = True
return jsonResponse(json.dumps(bestesAntwortDict))
else:
newPot = {}
newToken = generatePotToken()
while (mycol.find_one({"pots.token": newToken}) != None):
newToken = generatePotToken()
newPot["token"] = newToken
newPot["sleepTime"] = 1
newPot["criticalMoisture"] = 0
newPot["waterAmountML"] = 0
susUser["pots"].append(newPot)
mycol.save(susUser)
return 'hopefully successful'
@users.route('/api/users/deletePot', methods=["POST"])
def deletePot():
# Diese Funktion bekommt einen Login-Token und einen Pot-Token übergeben. Sie löscht den Pot mit diesem Token
bestesAntwortDict = {}
susUser = mycol.find_one({"tokens": request.json["loginToken"]})
if susUser == None:
# Wenn nach dieser Suchaktion susUser None ist, dann war das kein richtiger Token!
bestesAntwortDict["msg"] = "Incorrect token"
bestesAntwortDict["error"] = True
return jsonResponse(json.dumps(bestesAntwortDict))
else:
# Gucken, ob dieser User einen Pot mit diesem PotToken hat, und wenn ja diesen Löschen
wasFoundAndDeleted = False
for pot in susUser["pots"]:
if (pot["token"] == request.json["potToken"]):
susUser["pots"].remove(pot)
wasFoundAndDeleted = True
mycol.save(susUser)
if not wasFoundAndDeleted:
bestesAntwortDict["msg"] = "Pot either not existing or not yours"
bestesAntwortDict["error"] = True
return jsonResponse(json.dumps(bestesAntwortDict))
bestesAntwortDict["msg"] = "Pot deleted"
bestesAntwortDict["error"] = False
return jsonResponse(json.dumps(bestesAntwortDict))
@users.route('/api/users/changePot', methods=["POST"])
def setPotValues():
# Diese Funktion bekommt einen Login-Token, einen Pot-Token, einen sleepTime-Wert (optional), einen criticalMoisture-Wert übergeben. Sie denkt sich einen Pot-Token für den neuen Pot aus, erzeugt ihn und fügen ihn hinzu
bestesAntwortDict = {}
reqJson = request.get_json(force=True)
susUser = mycol.find_one({"tokens": reqJson["token"]})
if susUser == None:
# Wenn nach dieser Suchaktion susUser None ist, dann war das kein richtiger Token!
bestesAntwortDict["msg"] = "Incorrect token"
bestesAntwortDict["error"] = True
return jsonResponse(json.dumps(bestesAntwortDict))
else:
# Gucken, ob dieser User einen Pot mit diesem PotToken hat, und wenn ja dessen Daten ändern
wasFoundAndEdited = False
for pot in susUser["pots"]:
if (pot["token"] == reqJson["potToken"]):
if "sleepTime" in reqJson.keys():
pot["sleepTime"] = reqJson["sleepTime"];
if "criticalMoisture" in reqJson.keys():
pot["criticalMoisture"] = reqJson["criticalMoisture"];
if "waterAmountML" in reqJson.keys():
pot["waterAmountML"] = reqJson["waterAmountML"];
wasFoundAndEdited = True
mycol.save(susUser)
return
if not wasFoundAndEdited:
bestesAntwortDict["msg"] = "Pot either not existing or not yours"
bestesAntwortDict["error"] = True
return jsonResponse(dumps(bestesAntwortDict))
|
rosemaxio/flauraBackend
|
users/Api.py
|
Api.py
|
py
| 10,619 |
python
|
de
|
code
| 0 |
github-code
|
6
|
3835462701
|
#coding: utf-8
import urllib
import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
import webbrowser
import os
from threading import Timer
import base64
from ui import Image
import console
import sys
import io
sys.stderr = io.StringIO()
console.show_activity('Creating images…')
imagefilenames = [os.path.splitext(i)[0] for i in open(os.path.expanduser('~/Pythonista.app/Typicons-M.txt')).readlines()]
imagenames = [i.replace('Typicons96_', '') for i in imagefilenames]
images = {n: Image.named(imagefilenames[i]) for (i, n) in enumerate(imagenames)}
imageurls = {k:'data:image/png;base64,'+base64.b64encode(images[k].to_png()) for k in images}
choosecolorpath = os.path.dirname(sys.argv[0].split('/Documents/',1)[1]) + '/choosecolor.py'
tagtemplate = '<a href="pythonista://' +choosecolorpath+ '?action=run&argv=%s"><img src="%s"></a>'
imagetags = [tagtemplate%(k,imageurls[k]) for k in imagenames]
imagesstring = ''.join(imagetags)
html = '''
<!DOCTYPE html>
<html>
<head>
<style type="text/css">
body {
background:#292929;
text-align:center;
line-height:0;
margin:0;
}
img {
width:48px;
height:48px;
padding:6px;
margin:8px;
background-color:#707070;
background: linear-gradient(#707070, #5a5a5a);
border-radius:14px;
box-shadow:0 2px 4px rgba(0,0,0,0.5);
}
h1 {
font-family:"Avenir Next";
color: white;
padding: 10px;
text-shadow: 0 2px 4px rgba(0,0,0,0.5);
}
</style>
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
</head>
<body>
<h1>Choose an Icon</h1>
%s
</body>
</html>
''' % imagesstring
class RequestHandler (SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(html)
def log_message(self, format, *args):
pass
serv = BaseHTTPServer.HTTPServer(('', 0), RequestHandler)
port = serv.server_port
Timer(1, webbrowser.open, ('http://localhost:%d'%port,)).start()
console.show_activity('Starting server…')
serv.handle_request()
console.hide_activity()
|
0942v8653/pythonista-homescreen-icon
|
chooseicon.py
|
chooseicon.py
|
py
| 2,087 |
python
|
en
|
code
| 9 |
github-code
|
6
|
39399855904
|
import datetime
import decimal
import logging
import os
import re
from kensu.psycopg2.pghelpers import get_table_schema, get_current_db_info, pg_query_as_dicts
from kensu.utils.kensu_provider import KensuProvider
from kensu.utils.kensu import KensuDatasourceAndSchema
from kensu.utils.dsl.extractors.external_lineage_dtos import GenericComputedInMemDs
PG_CreateTableAs = 'PG_CreateTableAs'
PG_InsertTable = 'PG_InsertTable'
PG_CreateView = 'PG_CreateView'
# FIXME: quoted vs not quoted table names? e.g.:
# "testme"."testme_schema"."my_first_dbt_model" vs testme.testme_schema.my_first_dbt_model
def pg_relation_to_kensu_table_name(rel):
if str(type(rel)) == "<class 'dbt.adapters.postgres.relation.PostgresRelation'>":
return '.'.join([
rel.database,
rel.schema,
rel.identifier
])
return None
def format_relation_name(relation, cur_catalog=None, cur_schema=None):
if isinstance(relation, dict):
catalogname = ('catalogname' in relation and relation["catalogname"]) or None
schemaname = ('schemaname' in relation and relation["schemaname"]) or None
relname = ('relname' in relation and relation["relname"]) or None
else:
catalogname = hasattr(relation, 'catalogname') and relation.catalogname or None
schemaname = hasattr(relation, 'schemaname') and relation.schemaname or None
relname = hasattr(relation, 'relname') and relation.relname or None
if not relname:
return None
else:
parts = [
catalogname or cur_catalog,
schemaname or cur_schema,
relname
]
return '.'.join([n for n in parts if n])
def fetch_input_output_tables(stmt, cur_catalog, cur_schema):
from pglast import Node
output_tables = []
input_tables = []
for node in Node(stmt.stmt).traverse():
logging.debug("sql tree entry: "+str(node))
is_read_node = str(node) == 'fromClause[0]={RangeVar}'
is_read_from_join_node = str(node) == 'fromClause[0]={JoinExpr}'
is_write_node = 'rel={RangeVar}' in str(node)
if is_read_node or is_write_node:
table_name = format_relation_name(node.ast_node, cur_catalog, cur_schema)
if is_read_node:
input_tables.append(table_name)
elif is_write_node:
output_tables.append(table_name)
if is_read_from_join_node:
for joined_node in [node.ast_node.larg, node.ast_node.rarg]:
table_name = format_relation_name(joined_node, cur_catalog, cur_schema)
if table_name:
input_tables.append(table_name)
return input_tables, output_tables
def parse_pg_query(cursor, sql):
from pglast import parse_sql, ast
from kensu.psycopg2.pghelpers import get_table_schema, get_current_db_info
# fixme: this would be needed only if we support non-fully-qualified table references in SQL
# cur_catalog, cur_schema = get_current_db_info(cursor)
cur_catalog, cur_schema = None, None
parsed_tree = parse_sql(sql)
for stmt in parsed_tree:
stmt_type = None
output_tables = []
input_tables = []
if isinstance(stmt, ast.RawStmt):
if isinstance(stmt.stmt, ast.CreateTableAsStmt):
stmt_type = PG_CreateTableAs
input_tables, output_tables = fetch_input_output_tables(stmt, cur_catalog, cur_schema)
if isinstance(stmt.stmt, ast.InsertStmt):
stmt_type = PG_InsertTable
# TODO... there is probably more cases than insert ... values
table_name = format_relation_name(stmt.stmt.relation(), cur_catalog, cur_schema)
output_tables.append(table_name)
if isinstance(stmt.stmt, ast.ViewStmt):
stmt_type = PG_CreateView
output_table_name = format_relation_name(stmt.stmt.view, cur_catalog, cur_schema)
output_tables = [output_table_name]
input_tables, _ = fetch_input_output_tables(stmt, cur_catalog, cur_schema)
return stmt_type, input_tables, output_tables
def pg_try_get_schema(cursor, tname):
# FIXME: move this to kensu-py
try:
return list([(f.get('field_name') or 'unknown', f.get('field_type') or 'unknown')
for f in get_table_schema(cursor, tname)])
except:
logging.warning(f"failed getting schema for Postgres table {tname}")
def pg_to_kensu_entry(kensu_inst, cursor, tname, compute_stats=True):
# for postgres & mysql, dbt creates temporary tables and rename them later
# we want the final table name in kensu
# FIXME: this renaming might cause issues when fetching schema!!! should it happen here?
cleaned_tname = tname.replace('__dbt_tmp', '')
maybe_schema = pg_try_get_schema(cursor=cursor, tname=tname)
logging.warning(f"pg_schema: {maybe_schema}")
stats_values = None
if compute_stats and maybe_schema:
# FIXME: use a fresh cursor?
stats_values = query_stats(cursor, schema_fields=maybe_schema, orig_tname=tname)
logging.info(f'final Postgres stats values: {stats_values}')
server_info = cursor.connection.info.dsn_parameters
# FIXME: make sure the Postgres URI is consistent among all different collectors
# (e.g. is port always explicit vs default port)
ds_path = f"postgres://{server_info['host']}:{server_info['port']}/{cleaned_tname}" # FIXME?
entry = KensuDatasourceAndSchema.for_path_with_opt_schema(
ksu=kensu_inst,
ds_path=ds_path, # FIXME?
ds_name=cleaned_tname, # FIXME?
format='Postgres table',
# FIXME: ds_path like postgres://localhost:5432/a.b.c seem to cause error in Kensu webui
# categories=['logical::'+ f"postgres :: {server_info['host']}:{server_info['port']} :: {cleaned_tname}"],
categories=['logical::' + f"{cleaned_tname}"],
maybe_schema=maybe_schema,
f_get_stats=lambda: stats_values
) # type: KensuDatasourceAndSchema
return entry
def report_postgres(conn_mngr, cursor, sql, bindings):
if bindings is not None:
# Also we process the "%s" with `bindings` => otherwise the pglast parser fails
# so the result is
num_to_replace = len(re.findall("%.", sql))
sql = sql.replace("%s", "{}").format(*range(0, num_to_replace)) # bindings)
from kensu_reporting import get_kensu_agent
kensu_inst = get_kensu_agent()
stmt_type, input_tables, output_tables = parse_pg_query(cursor=cursor, sql=sql)
# e.g. input_tables=['source_data'], output_tables=['testme.testme_schema.my_first_dbt_model__dbt_tmp']
# input might contain false `table names`, being the subquery aliases inside the SQL `WITH` statement, e.g.:
# WITH source_data as (select 1) select * from source_data
# P.S. for now only fully-qualified table names supported in SQL (e.g. to remove subquery aliases)
convert_valid_tables_and_fetch_stats_fn = lambda tables: [
pg_to_kensu_entry(kensu_inst, cursor, t)
for t in tables if t.count('.') == 2]
if stmt_type == PG_CreateTableAs:
logging.info(f'POSTGRES create table. SQL: {sql}')
all_kensu_inputs = convert_valid_tables_and_fetch_stats_fn(input_tables)
elif stmt_type == PG_InsertTable:
# Considering that we are currently in a model inserting from `seed`
from kensu_reporting import get_current_thread_seeds
seed_inputs = get_current_thread_seeds()
inputs = convert_valid_tables_and_fetch_stats_fn(input_tables)
# TODO probably other cases than just seed_inputs?
all_kensu_inputs = [*seed_inputs, *inputs]
logging.info(f'POSTGRES insert. SQL: {sql}')
elif stmt_type == PG_CreateView:
all_kensu_inputs = convert_valid_tables_and_fetch_stats_fn(input_tables)
logging.debug(f'POSTGRES create view. SQL: {sql}')
else:
logging.info(f"POSTGRES untracked statement: sql={sql}")
return
if all_kensu_inputs and output_tables:
outputs = [pg_to_kensu_entry(kensu_inst, cursor, o)
for o in output_tables if o.count('.') == 2]
for output in outputs:
lineage=GenericComputedInMemDs.for_direct_or_full_mapping(all_inputs=all_kensu_inputs,
out_field_names=output.field_names())
if len(lineage.lineage) <= 0:
continue
lineage.report(
ksu=kensu_inst,
df_result=output,
operation_type='NA',
report_output=True,
register_output_orig_data=True
)
kensu_inst.report_with_mapping()
def query_stats(cursor, schema_fields, orig_tname):
stats_aggs = pg_generate_fallback_stats_queries(schema_fields)
input_filters=None
filters = ''
if input_filters is not None and len(input_filters) > 0:
filters = f"WHERE {' AND '.join(input_filters)}"
selector = ",".join([sql_aggregation + " " + col.replace(".","__ksu__") + "_" + stat_name
for col, stats_for_col in stats_aggs.items()
for stat_name, sql_aggregation in stats_for_col.items()])
stats_sql = f"select {selector}, sum(1) as nrows from {str(orig_tname)} {filters}"
logging.info(f'SQL query to fetch Postgres stats: {stats_sql}')
stats_result = pg_query_as_dicts(cur=cursor, q=stats_sql)
logging.debug(f'Postgres stats: {stats_result}')
r = {}
# FIXME: hmm this logic seem quite shared with BigQuery, extract common parts?
for row in stats_result:
if row.get('nrows'):
r['nrows'] = row['nrows']
# extract column specific stats
for col, stat_names in stats_aggs.items():
for stat_name in stat_names.keys():
result_column = col.replace(".","__ksu__") + "_" + stat_name
# looks like postgres:13 return only lowercase
v = row.get(result_column.lower()) or row.get(result_column)
if v.__class__ in [datetime.date, datetime.datetime, datetime.time]:
v = int(v.strftime("%s") + "000")
if v.__class__ in [decimal.Decimal]:
v = float(v)
if v is None:
# FIXME: this might be misleading actually
v = 0
r[(col + "." + stat_name).replace("__ksu__",".")] = v
break # there should be only one row here
return r
def pg_generate_fallback_stats_queries(schema_fields):
stats_aggs = {}
for field_name, field_type in schema_fields:
field_type = field_type.upper()
# https://www.postgresql.org/docs/9.5/datatype.html
# seem like we need to quote field names which are case-sensitive (upercase)
nullrows_agg = f"""sum(num_nulls("{field_name}"))"""
min_agg = f"""min("{field_name}")"""
max_agg = f"""max("{field_name}")"""
avg_agg = f"""avg("{field_name}")"""
if field_type in ["INTEGER", "INT", "INT4", "DECIMAL", "SMALLINT", "INT2", "FLOAT",
"FLOAT4", "FLOAT8", "FLOAT64", "REAL", "NUMERIC", "BIGINT", "INT8"]:
stats_aggs[field_name] = {"min": min_agg,
"max": max_agg,
"mean": avg_agg,
"nullrows": nullrows_agg}
elif field_type in ["TIMESTAMP", "TIMESTAMPTZ", "DATE", "TIME", "TIMETZ", "DATETIME"]:
stats_aggs[field_name] = {"min": min_agg,
"max": max_agg,
"nullrows": nullrows_agg}
elif field_type in ["BOOLEAN", "BOOL"]:
stats_aggs[field_name] = {"true": f"""sum(case "{field_name}" when true then 1 else 0 end)""",
"nullrows": nullrows_agg}
elif field_type in ["STRING", "TEXT"]:
stats_aggs[field_name] = {"levels": f"""count(distinct "{field_name}")""",
"nullrows": nullrows_agg}
return stats_aggs
|
Fundamentals-of-Data-Observability/handson
|
python_environment/volume/week2/dbt/dbt-do/dbt-ast/kensu_postgres.py
|
kensu_postgres.py
|
py
| 12,258 |
python
|
en
|
code
| 8 |
github-code
|
6
|
30338109347
|
from collections import deque
n, k = map(int, input().split())
graph = [[] * (k+1) for _ in range(k+1)]
gmap = [[] * (n+1) for _ in range(n+1)]
for i in range(1, n+1):
arr = list(map(int, input().split()))
gmap[i] = arr
for j, m in zip(arr, range(1, n+1)):
if j != 0:
graph[j].append((i, m))
s, a, b = map(int, input().split())
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
graph.sort()
for k in range(s):
print(k)
for i in range(1, k+1):
queue = deque()
while graph[i]:
queue.append(graph[i].pop())
x, y = queue.popleft()
for j in range(4):
nx = x + dx[j]
ny = y + dy[j]
if nx <= 0 or ny <= 0 or nx > n or ny > n:
continue
if gmap[nx][ny-1] == 0:
gmap[nx][ny-1] = i
queue.append((nx, ny))
graph[i] = queue
print(gmap[a][b-1])
|
minju7346/CordingTest
|
bfs3.py
|
bfs3.py
|
py
| 944 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24985299915
|
#Imprting libraries
from dash import Dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from random import randint
import plotly.express as px
# Creating dash environment
app = Dash(__name__)
# Constructing the layout
app.layout = html.Div([
# Title
html.Div(dcc.Markdown("Random number plotter"), style={"textAlign":"center", 'font-size':'300%'}),
# Num. of point selection area, our N
html.Div(["Number of points: ", dcc.Input(id='number_of_pts', value=100, type='number', min=1, style={'height': '25px', 'width':'100px'})], style={'padding-left':'80px', 'padding-top':'30px', 'display': 'inline-block', 'font-size':'150%'}),
# Max range selection area, our K
html.Div(["Max range: ", dcc.Input(id='upper_bound', value=100, type='number', min=1, style={'height': '25px', 'width': '100px'})], style={'padding-left': '50px', 'display': 'inline-block', 'font-size':'150%'}),
# Our scatter plot
dcc.Graph(id='random_graph', style={'height':'800px'}),
# Title for the selected data area
html.Div([dcc.Markdown("Selected points: ", style={'padding-left':'80px', 'font-size':'200%'})]),
# Selected data area
html.Div(html.Pre(id='selected_data', style={'border': 'thin lightgrey solid', 'overflowY': 'scroll', 'font-size':'200%'}), style={'width':'90%', 'padding-left':'80px'})])
# Callback function for number of points and range selection
@app.callback(
Output("random_graph", "figure"),
[Input("number_of_pts", "value"),
Input("upper_bound", "value")])
def update_graph(number_of_pts, max_range):
if(number_of_pts and max_range != None): # Check whether arguments are null
A_array = []
for i in range(number_of_pts):
A_array.append(randint(1, max_range))
fig = px.scatter(y=A_array, labels={"x":"index", "y":"value"})
fig.update_layout(showlegend=False)
return fig # return updated scatter plot
return px.scatter() # Return empty scatter plot
# Callback function for graph selection
@app.callback(
Output("selected_data", "children"),
[Input("random_graph", "selectedData")])
def update_selecteData(data):
try:
data = data["points"]
print(data)
points = []
for point in data:
points.append("Index: {}, Value: {} \n".format(point["x"], point["y"])) # Make a string list of selected data, from Indexes and Values
return points
except:
return ""
# Run Dash app
app.run_server()
|
Miautawn/simple-DashBoard-with-Dash
|
random_plotter.py
|
random_plotter.py
|
py
| 2,553 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38751132701
|
import os
import util
import config
scanned_files = 0
for directory, _, files_list in os.walk(config.td):
#directory looks like "c:\user\box\...\phonexx\images"
for ea_filename in files_list:
#ea_filename looks like "ADX-....jpg"
file_path = (directory+"\\"+ea_filename)
#file_path looks like "c:\td\...\[filename].jpg"
relative_dir = directory.replace(config.td, "")
# looks like "\phonexx\images"
file_attributes = ea_filename.split("_")
scanned_files += 1
if ( scanned_files % 1000 ) == 0 : print("Scanned ",scanned_files," files.")
if file_attributes[0] == 'PXL':
dest_dir = config.dd+"\\"+relative_dir
util.move_this_file(file_path,dest_dir,ea_filename)
config.moved_files += 1
if (config.moved_files % 1000) == 0 : print("Moved",config.moved_files,"files.")
else: continue
print("Moved",config.moved_files,"Files.")
|
maravis05/EyeCup-File-Scanner
|
move_pxl.py
|
move_pxl.py
|
py
| 1,001 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17202898458
|
#!/usr/bin/env python
import robot
import time
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
import json
import logging
def send_json(data):
msg = {'action':data}
msg = json.dumps(msg)
return (msg)
def action(client, userdata, message):
data = message.payload.decode()
data = json.loads(data)
data = data['action']
print (data)
if data == 'forward':
print ('forward')
robot.forward()
elif data == 'stop':
print ('stop')
robot.stop()
elif data == 'left':
print ('left')
robot.left()
elif data == 'right':
print ('right')
robot.right()
elif data == 'backward':
print ('backward')
robot.backward()
elif data == 'servo left':
print ('servo left')
robot.servo_left()
elif data == 'servo center':
print ('servo center')
robot.servo_center()
elif data == 'servo right':
print ('servo right')
robot.servo_right()
elif data == 'lights':
print ('lights')
robot.lights()
elif data == 'blinkers':
print ('blinkers')
robot.blinkers()
elif data == 'voltage':
print('voltage')
voltage = robot.voltage()
voltage = send_json(voltage)
myMQTT.publish('from_robot', voltage, 0)
elif data == 'distance':
print('distance')
distance = robot.distance()
distance = send_json(distance)
myMQTT.publish('from_robot', distance, 0)
else:
pass
key_dir = '/home/pi/aaa_mikes_gopigo/keys/'
myMQTT = AWSIoTMQTTClient('Leonard')
myMQTT.configureEndpoint('a111amujev1y9r.iot.us-west-2.amazonaws.com', 8883)
myMQTT.configureCredentials(key_dir+'root-CA.crt', key_dir+'Leonard.private.key', key_dir+'Leonard.cert.pem')
myMQTT.configureOfflinePublishQueueing(-1)
myMQTT.configureDrainingFrequency(2)
myMQTT.configureConnectDisconnectTimeout(10)
myMQTT.connect()
myMQTT.subscribe('to_robot', 1, action)
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
print('Waiting for data...')
while True:
pass
|
mkyle1121/gopigo
|
web/robot_sock_client2.py
|
robot_sock_client2.py
|
py
| 2,084 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8207780556
|
import base64
from django.shortcuts import render
from django.http import JsonResponse
from django.views.decorators.cache import never_cache
from stolen_wiki_game.models import Article
# Create your views here.
def index(request):
return render(request, 'stolen_wiki_game/index.html', {})
@never_cache
def article(request):
art = Article.objects.order_by('?').first()
encoded = base64.b64encode(art.slug.encode('ascii')).decode('ascii')
return JsonResponse({
'article': encoded,
'redactleIndex': 0,
'token': 'abc',
'yesterday': 'blah'
})
|
Jack-Naughton/homepage
|
stolen_wiki_game/views.py
|
views.py
|
py
| 596 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1443436731
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import Camera
class CameraAddForm(forms.ModelForm):
class Meta:
model = Camera
fields = ['title', 'ip']
labels = {
'title': _('Titel'),
'ip': _('Adresse')
}
|
Thorium0/Security-terminal
|
Terminal/camera/forms.py
|
forms.py
|
py
| 324 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12492560506
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from __future__ import print_function
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report
from sklearn import metrics
from sklearn import tree
import warnings
warnings.filterwarnings('ignore')
#for interactivity
from ipywidgets import interact
from tabulate import tabulate
data=pd.read_csv('crop_recommendation.csv')
print("Shape Of The Data:",data.shape)
print()
data.head()
data.tail()
data.columns
data.dtypes
data['label'].unique()
data['label'].value_counts()
data.isnull().sum()
#summary of dataset
# create a list of variable names
variables = ['N', 'P', 'K', 'temperature', 'humidity', 'ph', 'rainfall']
# loop through the variables and calculate/print their mean values
for var in variables:
print("Average ratio of {0} in the soil: {1:.2f}".format(var, data[var].mean()))
@interact(crops=data['label'].unique())
def summary(crops):
stats = ['mean', 'median', 'min', 'max']
vars = ['N', 'P', 'K', 'temperature', 'humidity', 'ph', 'rainfall']
print("Statistics for crop:", crops)
for var in vars:
print("Statistics of", var)
print("Statistics for crop:", crops)
for stat in stats:
print(stat, var, "required: {:.2f}".format(data[data['label'] == crops][var].agg(stat)))
print() # Add an empty print statement to print a blank line
@interact
def compare(conditions =['N','P','K','temperature','humidity','rainfall','ph']):
crops = ['rice', 'maize', 'jute', 'blackgram', 'banana', 'coconut', 'apple', 'papaya', 'muskmelon', 'grapes', 'watermelon', 'kidneybeans', 'mungbean', 'orange', 'chickpea', 'lentil', 'cotton', 'pigeonpeas', 'mothbeans', 'mango', 'pomegranate', 'coffee']
for crop in crops:
print("{}: {:.2f}".format(crop.title(), data[data['label'] == crop][conditions].mean()))
@interact
def compare(conditions =['N','P','K','temperature','humidity','rainfall','ph']):
print("crops which requires greater than the average",conditions,'\n')
print(data[data[conditions]>data[conditions].mean()]['label'].unique())
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("crops which require less than average ", conditions,'\n')
print(data[data[conditions]<=data[conditions].mean()]['label'].unique())
print("Some interesting facts")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
headers = ['Condition', 'Crops']
data = [
['High Nitrogen content in soil', ', '.join(data[data['N']>120]['label'].unique())],
['High Phosphorus content in soil', ', '.join(data[data['P']>100]['label'].unique())],
['High Potassium content in soil', ', '.join(data[data['K']>200]['label'].unique())],
['High Rainfall', ', '.join(data[data['rainfall']>200]['label'].unique())],
['Low temperature', ', '.join(data[data['temperature']<10]['label'].unique())],
['High temperature', ', '.join(data[data['temperature']>40]['label'].unique())],
['Low humidity', ', '.join(data[data['humidity']<20]['label'].unique())],
['High PH', ', '.join(data[data['ph']>9]['label'].unique())],
['Very low PH', ', '.join(data[data['ph']<4]['label'].unique())]
]
print(tabulate(data, headers=headers))
# In[2]:
import pandas as pd
from sklearn.cluster import KMeans
import warnings
warnings.filterwarnings('ignore')
data=pd.read_csv('crop_recommendation.csv')
x=data.loc[:,['N','P','K','temperature','humidity','rainfall','ph']].values
print(x.shape)
x_data=pd.DataFrame(x)
x_data.head()
plt.rcParams['figure.figsize']=(10,4)
wcss=[]
for i in range(1,11):
km=KMeans(n_clusters=i,init='k-means++',max_iter=300,n_init=10,random_state=0)
km.fit(x)
wcss.append(km.inertia_)
plt.plot(range(1,11),wcss)
plt.title('The Elbow method',fontsize=20)
plt.xlabel('no. of cluster')
plt.ylabel('wcss')
plt.show()
km=KMeans(n_clusters=4,init='k-means++',max_iter=300,n_init=10,random_state=0)
y_means=km.fit_predict(x)
a=data['label']
y_means=pd.DataFrame(y_means)
z=pd.concat([y_means,a],axis=1)
z=z.rename(columns={0:'cluster'})
#check cluster for each group
print("crops in first cluster :",z[z['cluster']==0]['label'].unique())
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("crops in second cluster :",z[z['cluster']==1]['label'].unique())
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("crops in third cluster :",z[z['cluster']==2]['label'].unique())
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("crops in forth cluster :",z[z['cluster']==3]['label'].unique())
# In[3]:
from sklearn.model_selection import train_test_split
import pandas as pd
data=pd.read_csv('crop_recommendation.csv')
x=data.drop(['label'],axis=1)
y=data['label']
print("shape of x",x.shape)
print("shape of y",y.shape)
print()
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=0)
from sklearn.linear_model import LogisticRegression
model=LogisticRegression()
model.fit(x_train,y_train)
y_pred=model.predict(x_test)
from sklearn.metrics import classification_report
cr=classification_report (y_test,y_pred)
print(cr)
Prediction=model.predict((np.array([[90,40,40,20,80,7,200]])))
print("the suggested crop for this given climate condition is ",Prediction)
Prediction=model.predict((np.array([[20,30,10,15,90,7.5,100]])))
print("the suggested crop for this given climate condition is ",Prediction)
print()
# convert any non-numeric columns to numeric
for col in data.columns:
if data[col].dtype == object:
data[col] = pd.to_numeric(data[col], errors='coerce')
sns.heatmap(data.corr(),annot=True)
features = data[['N', 'P','K','temperature', 'humidity', 'ph', 'rainfall']]
target = data['label']
#features = df[['temperature', 'humidity', 'ph', 'rainfall']]
labels = data['label']
# Initialzing empty lists to append all model's name and corresponding name
acc = []
model = []
# In[4]:
#Decision Tree
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score
data = pd.read_csv("crop_recommendation.csv")
X = data.drop(['label'], axis=1)
y = data['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
dt_clf = DecisionTreeClassifier()
dt_clf.fit(X_train, y_train)
y_pred = dt_clf.predict(X_test)
dt_acc = accuracy_score(y_test, y_pred)
print("Accuracy of Decision Tree:", dt_acc*100)
cv_score = cross_val_score(dt_clf, X, y, cv=5)
print("Cross-validation score:", cv_score)
print()
print(classification_report(y_test,y_pred))
#Saving trained Decision Tree model
import pickle
# Dump the trained Naive Bayes classifier with Pickle
DT_pkl_filename = 'DecisionTree.pkl'
# Open the file to save as pkl file
DT_Model_pkl = open(DT_pkl_filename, 'wb')
pickle.dump(DecisionTreeClassifier, DT_Model_pkl)
# Close the pickle instances
DT_Model_pkl.close()
# In[5]:
# Guassian Naive Bayes
import pandas as pd
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score
data = pd.read_csv("crop_recommendation.csv")
X = data.drop(['label'], axis=1)
y = data['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
nb_clf = GaussianNB()
nb_clf.fit(X_train, y_train)
y_pred = nb_clf.predict(X_test)
nb_acc = accuracy_score(y_test, y_pred)
print("Accuracy of Naive Bayes:", nb_acc*100)
cv_score = cross_val_score(nb_clf, X, y, cv=5)
print("Cross-validation score:", cv_score)
print()
print(classification_report(y_test,y_pred))
#Saving trained Guassian Naive Bayes model
import pickle
# Dump the trained Naive Bayes classifier with Pickle
NB_pkl_filename = 'NBClassifier.pkl'
# Open the file to save as pkl file
NB_Model_pkl = open(NB_pkl_filename, 'wb')
pickle.dump(GaussianNB, NB_Model_pkl)
# Close the pickle instances
NB_Model_pkl.close()
# In[6]:
# Support Vector Machine (SVM)
import pandas as pd
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score
data = pd.read_csv("crop_recommendation.csv")
X = data.drop(['label'], axis=1)
y = data['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
svm_clf = SVC(kernel='linear')
svm_clf.fit(X_train, y_train)
y_pred = svm_clf.predict(X_test)
svm_acc = accuracy_score(y_test, y_pred)
print("Accuracy of SVM:", svm_acc*100)
cv_score = cross_val_score(svm_clf, X, y, cv=5)
print("Cross-validation score:", cv_score)
print()
print(classification_report(y_test,y_pred))
#Saving trained SVM model
import pickle
# Dump the trained SVM classifier with Pickle
SVM_pkl_filename = 'SVMClassifier.pkl'
# Open the file to save as pkl file
SVM_Model_pkl = open(SVM_pkl_filename, 'wb')
pickle.dump(SVC, SVM_Model_pkl)
# Close the pickle instances
SVM_Model_pkl.close()
# In[7]:
#Logistic Regression
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score
data = pd.read_csv("Crop_recommendation.csv")
X = data.drop(['label'], axis=1)
y = data['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
lr_clf = LogisticRegression(random_state=42)
lr_clf.fit(X_train, y_train)
y_pred = lr_clf.predict(X_test)
lr_acc = accuracy_score(y_test, y_pred)
print("Accuracy of Logistic Regression:", lr_acc*100)
cv_score = cross_val_score(lr_clf, X, y, cv=5)
print("Cross-validation score:", cv_score)
print()
print(classification_report(y_test,y_pred))
#Saving trained Logistic Regression model
import pickle
# Dump the trained Naive Bayes classifier with Pickle
LR_pkl_filename = 'LogisticRegression.pkl'
# Open the file to save as pkl file
LR_Model_pkl = open(DT_pkl_filename, 'wb')
pickle.dump(LogisticRegression, LR_Model_pkl)
# Close the pickle instances
LR_Model_pkl.close()
# In[8]:
#Random Forest
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score
data = pd.read_csv("crop_recommendation.csv")
X = data.drop(['label'], axis=1)
y = data['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
rf_clf = RandomForestClassifier(n_estimators=100, random_state=42)
rf_clf.fit(X_train, y_train)
y_pred = rf_clf.predict(X_test)
rf_acc = accuracy_score(y_test, y_pred)
print("Accuracy of Random Forest:", rf_acc*100)
cv_score = cross_val_score(rf_clf, X, y, cv=5)
print("Cross-validation score:", cv_score)
print()
print(classification_report(y_test,y_pred))
#Saving trained Random Forest model
import pickle
# Dump the trained Naive Bayes classifier with Pickle
RF_pkl_filename = 'RandomForest.pkl'
# Open the file to save as pkl file
RF_Model_pkl = open(RF_pkl_filename, 'wb')
pickle.dump(RandomForestClassifier, RF_Model_pkl)
# Close the pickle instances
RF_Model_pkl.close()
# In[9]:
#Accuracy Comparison
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score
models = ['Decision Tree', 'SVM', 'Naive Bayes', 'Logistic Regression', 'Random Forest']
accuracies = [dt_acc, svm_acc, nb_acc, lr_acc, rf_acc]
fig, ax = plt.subplots(figsize=(10,7))
colors = ['cyan', 'pink', 'aquamarine', 'turquoise', 'lavender']
bars = ax.bar(models, accuracies, color=colors)
for bar in bars:
height = bar.get_height()
ax.text(bar.get_x() + bar.get_width() / 2, height, round(height, 4),
ha='center', va='bottom', fontsize=12)
ax.set_xlabel('Model')
ax.set_ylabel('Accuracy')
ax.set_title('Comparison of Accuracy for Different Classifiers')
plt.show()
# In[10]:
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
classifiers = [dt_clf, nb_clf, svm_clf, lr_clf, rf_clf]
scores = []
for clf in classifiers:
cv_scores = cross_val_score(clf, X, y, cv=10)
scores.append(cv_scores)
fig = plt.figure(figsize=(10, 7))
ax = fig.add_subplot(111)
bp = ax.boxplot(scores, patch_artist=True)
plt.setp(bp['boxes'], facecolor='lightblue')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['caps'], color='black')
plt.setp(bp['medians'], color='red')
plt.setp(bp['fliers'], marker='o', markersize=3, markerfacecolor='blue')
ax.set_xticklabels(['Naive Bayes', 'SVM', 'Random Forest', 'Logistic Regression', 'Decision Tree'])
ax.set_ylabel('Cross-validation score')
ax.set_title('Comparison of Cross-Validation Scores for Different Classifiers')
plt.show()
# In[11]:
classifiers = [dt_clf, nb_clf, svm_clf, lr_clf, rf_clf]
acc = [dt_acc, svm_acc, nb_acc, lr_acc, rf_acc]
# Create a dictionary that maps each classifier name to its accuracy score
accuracy_models = dict(zip(classifiers, acc))
# Print the dictionary
for k, v in accuracy_models.items():
print (k, '==>', v)
# In[12]:
import numpy as np
# Define a single data instance to predict the crop label for
data_instance = np.array([[90, 42, 43, 23.6, 50.2, 6.8, 187.2]])
# Use the pre-trained Random Forest classifier to predict the crop label for the data instance
predicted_label = rf_clf.predict(data_instance)
# Print the predicted crop label
print("Predicted crop label:", predicted_label)
# In[13]:
import numpy as np
# Define a single data instance to predict the crop label for
data_instance = np.array([[83, 45, 60, 28, 70.3, 7.0, 150.9]])
# Use the pre-trained Random Forest classifier to predict the crop label for the data instance
predicted_label = rf_clf.predict(data_instance)
# Print the predicted crop label
print("Predicted crop label:", predicted_label)
|
Panktibhatt08/Machine-Learning
|
Machine Learning Final Project.py
|
Machine Learning Final Project.py
|
py
| 14,506 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15366599622
|
from .File import File, WrongFormatError, BrokenFormatError, FileNotFoundError, EmptyFileError
from .FileFormats import FileFormat
# User defined formats
from .FASTInFile import FASTInFile
from .FASTOutFile import FASTOutFile
from .FASTWndFile import FASTWndFile
from .CSVFile import CSVFile
from .HAWC2PCFile import HAWC2PCFile
from .HAWC2AEFile import HAWC2AEFile
from .HAWC2DatFile import HAWC2DatFile
from .FLEXOutFile import FLEXOutFile
#from .NetCDFFile import NetCDFFile
class FormatNotDetectedError(Exception):
pass
def fileFormats():
formats = []
formats.append(FileFormat(CSVFile))
formats.append(FileFormat(FASTInFile))
formats.append(FileFormat(FASTOutFile))
formats.append(FileFormat(FASTWndFile))
formats.append(FileFormat(HAWC2DatFile))
formats.append(FileFormat(HAWC2PCFile))
formats.append(FileFormat(HAWC2AEFile))
formats.append(FileFormat(FLEXOutFile))
#formats.append(FileFormat(NetCDFFile))
return formats
def detectFormat(filename):
""" Detect the file formats by looping through the known list.
The method may simply try to open the file, if that's the case
the read file is returned. """
import os
formats=fileFormats()
ext = os.path.splitext(filename.lower())[1]
detected = False
i = 0
while not detected and i<len(formats):
myformat = formats[i]
if ext in myformat.extensions:
valid, F = myformat.isValid(filename)
if valid:
#print('File detected as :',myformat)
detected=True
return myformat,F
i += 1
if not detected:
raise FormatNotDetectedError('The file format was not detected.')
def read(filename,fileformat=None):
F = None
# Detecting format if necessary
if fileformat is None:
fileformat,F = detectFormat(filename)
# Reading the file with the appropriate class if necessary
if not isinstance(F,fileformat.constructor):
F=fileformat.constructor(filename=filename)
return F
|
rhaghi/welib
|
welib/weio/__init__.py
|
__init__.py
|
py
| 2,069 |
python
|
en
|
code
| null |
github-code
|
6
|
6214826810
|
from json import loads
from bot_core.utils.redis_topics import CMD_COLL_NAME
from bot_core.utils.action_tools import cmd_analysis, pub_sub
def main():
pub_sub.subscripe(CMD_COLL_NAME)
while (True):
msg_data = pub_sub.get_message().get("data", None)
if msg_data:
cmd_analysis(
loads(msg_data)
)
if __name__ == "__main__":
main()
|
KTOALE/tel_bot_coro
|
bot_core/src/main_core.py
|
main_core.py
|
py
| 401 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38763808043
|
import spotipy
from model import *
from enum import Enum
class EmotionsMood(Enum):
"""Creating constants to assign to different moods"""
Calm = 1 #Calm
Energetic = 2
happy = 2
sad = 0
def get_songs_features(sp,ids):
"""Get features of songs to identify tyoe of music"""
meta = sp.track(ids)
features = sp.audio_features(ids)
# meta
name = meta['name']
album = meta['album']['name']
artist = meta['album']['artists'][0]['name']
release_date = meta['album']['release_date']
length = meta['duration_ms']
popularity = meta['popularity']
ids = meta['id']
# features
acousticness = features[0]['acousticness']
danceability = features[0]['danceability']
energy = features[0]['energy']
instrumentalness = features[0]['instrumentalness']
liveness = features[0]['liveness']
valence = features[0]['valence']
loudness = features[0]['loudness']
speechiness = features[0]['speechiness']
tempo = features[0]['tempo']
key = features[0]['key']
time_signature = features[0]['time_signature']
track = [name, album, artist, ids, release_date, popularity, length, danceability, acousticness,
energy, instrumentalness, liveness, valence, loudness, speechiness, tempo, key, time_signature]
columns = ['name','album','artist','id','release_date','popularity','length','danceability','acousticness','energy','instrumentalness',
'liveness','valence','loudness','speechiness','tempo','key','time_signature']
return track,columns
def recommendations(token,emmotion_value):
"""Get actual recommendations based on songs's features"""
playlistIdUrl=[]
track_ids = []
recommended_track=[]
rec_tracks = []
if token:
sp = spotipy.Spotify(auth=token)
else:
print("Can't get token")
return
userId=sp.current_user()['id']
if userId:
all_playlists = sp.current_user_playlists(limit=1,offset=0)
for item in all_playlists['items']:
if item['external_urls']['spotify'] not in playlistIdUrl:
playlistIdUrl.append( item['external_urls']['spotify'])
for playlistId in playlistIdUrl:
Playlist = sp.user_playlist(userId, playlistId)
tracks = Playlist["tracks"]
songs = tracks["items"]
for i in range(0, len(songs)):
if songs[i]['track']['id'] != None and songs[i]['track']['id'] not in track_ids: # Removes the local tracks in your playlist if there is any
track_ids.append(songs[i]['track']['id'])
for id in track_ids:
rec_tracks += sp.recommendations(seed_tracks=[id], seed_genres=['indian, happy, calm, chill'], limit=2, min_valence=0.3, min_popularity=60)['tracks']
for track in rec_tracks:
imageUrl=''
if track['album']['images']:
imageUrl=track['album']['images'][0]['url']
trackUrl=track['external_urls']['spotify']
name=track['name']
features=get_songs_features(sp,track["id"])
mood=predict_mood(features)
# print( mood.upper())
# print(EmotionsMood(emmotion_value).name.upper())
if mood.upper()==EmotionsMood(emmotion_value).name.upper():
if trackUrl not in recommended_track:
recommended_track.append({'imageUrl': imageUrl,'trackUrl':trackUrl,'name':name})
return recommended_track
# playlist_recs = sp.user_playlist_create(username,
# name='Recommended Songs for Playlist by Amit - {}'.format(sourcePlaylist['name']))
# #Add tracks to the new playlist
# for i in rec_array:
# sp.user_playlist_add_tracks(username, playlist_recs['id'], i)
else:
return ("Token expired!!!")
|
nagarro-hackathon-2023/python_ml
|
spotify.py
|
spotify.py
|
py
| 3,947 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16320688822
|
#!/usr/bin/python
from tkinter import *
# mebuat kolom window
root =Tk()
root.geometry('250x250')
root.title('Cantatan mede in yusuf')
# membuat text box
text = Text(root, font=('haveltical 15 bold'), bd=2)
text.focus()
text.pack()
# membuat fungsi untuk perintah cut
# pilihan texs
def cut_text():
text.event_generate(('<<Cut>>'))
# mebuat fungsi mengcopy
# pilihan text
def copy_text():
text.event_generate(('<<Copy>>'))
# membuat fungsi paste
def paste_text():
text.event_generate(('<<Paste>>'))
# membuat menubar
menu = Menu(root, tearoff= 0)
menu.add_command(label='Cut', command=cut_text)
menu.add_command(label='Paste', command=paste_text)
menu.add_command(label='Copy', command=copy_text)
menu.add_separator()
menu.add_command(label='Exit', command=root.destroy)
# mendifinisikan funsi popup
# cotex menu tombol klik kiri
def context_menu(event):
try:
menu.tk_popup(event.x_root, event.y_root)
finally:
menu.grab_release()
# binding right click button to root
root.bind('<Button-3>', context_menu)
root.mainloop()
|
dulimpul/dulimpul
|
main.py
|
main.py
|
py
| 1,088 |
python
|
id
|
code
| 0 |
github-code
|
6
|
29899432213
|
import numpy as np
from absl import app, flags
HEADER_SIZE = 10
RECORD_SIZE = 100
FLAGS = flags.FLAGS
flags.DEFINE_string(
"input_file",
None,
"Path to binary data file.",
short_name="i",
)
flags.DEFINE_string(
"output_file",
None,
"Path to output file (optional).",
short_name="o",
)
flags.DEFINE_bool(
"header_only",
True,
f"If set, only decode the first {HEADER_SIZE} bytes of every {RECORD_SIZE} bytes.",
)
flags.DEFINE_integer(
"count",
-1,
"Only decode this many bytes; if -1, decode all.",
short_name="c",
)
flags.mark_flag_as_required("input_file")
def main(argv):
del argv # Unused.
print("Decoding", FLAGS.input_file)
if FLAGS.count >= 0:
print(f"Decoding the first {FLAGS.count} bytes")
if FLAGS.header_only:
print("Only decoding headers")
arr = np.fromfile(FLAGS.input_file, dtype=np.uint8, count=FLAGS.count)
output_file = FLAGS.output_file or FLAGS.input_file + ".txt"
with open(output_file, "w") as fout:
for (i,), x in np.ndenumerate(arr):
if not (FLAGS.header_only and i % RECORD_SIZE >= HEADER_SIZE):
print(f"{x:02x} ", end="", file=fout)
if i % RECORD_SIZE == RECORD_SIZE - 1:
print("", file=fout)
if __name__ == "__main__":
app.run(main)
|
exoshuffle/raysort
|
scripts/misc/decode.py
|
decode.py
|
py
| 1,340 |
python
|
en
|
code
| 14 |
github-code
|
6
|
31086494290
|
import unittest
from midiutil import MIDIFile
from services.midi_creator import MidiCreator
from entities.shaku_part import ShakuPart
from entities.shaku_note import ShakuNote
class TestMidiCreator(unittest.TestCase):
def setUp(self):
self.creator = MidiCreator()
def test_generate_midi_raises_error_if_no_data(self):
self.assertRaises(ValueError, self.creator.generate_midi)
def test_generate_midi_raises_error_on_no_notes(self):
part = ShakuPart(1, 10, 1)
self.creator.create_track(part)
self.assertRaises(ValueError, self.creator.generate_midi)
def test_generate_midi_returns_midi_file(self):
note = ShakuNote(1, (100, 100), 8, True)
part = ShakuPart(1, 10, 1)
part.add_note(note)
self.creator.create_track(part)
self.assertIsInstance(self.creator.generate_midi(), MIDIFile)
|
ElectricShakuhachi/shakunotator
|
src/tests/midi_creator_test.py
|
midi_creator_test.py
|
py
| 883 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21508477562
|
def SieveOfEratosthenes(n):
prime = [True for i in range(n + 1)]
p = 2
while (p * p <= n):
if (prime[p] == True):
for i in range(p * 2, n + 1, p):
prime[i] = False
p += 1
prime[0]= False
prime[1]= False
return prime
if __name__=='__main__':
N = 1000000
prime = SieveOfEratosthenes(N)
t = int(input())
for i in range(0, t):
n = int(input())
prime_sum = 0
for j in range(2, n+1):
if prime[j]:
prime_sum += j
print(prime_sum)
|
hmharshit/hacktoberfest
|
Solutions/summation_of_primes.py
|
summation_of_primes.py
|
py
| 618 |
python
|
en
|
code
| 12 |
github-code
|
6
|
21320836605
|
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import SAGEConv
from torch_geometric.nn import aggr
from src.utils import make_diff_matrix, triu_vector
class Model(nn.Module):
NUM_NODE_FEATURES = 140
NUM_OPCODES = 120
def __init__(self,
is_tile: bool,
is_nlp: bool, # unused for now
is_default: bool, # unused for now
wider_config: bool,
node_config_feat_size: int = 18,
tile_config_feat_size: int = 24,
):
"""
Args:
is_tile (bool): False: layout, True: tile
is_nlp (bool): False: xla, True: nlp
is_default (bool): False: random, True: default
"""
super().__init__()
node_feat_emb_size = 20
node_opcode_emb_size = 12
self.node_feat_embedding = nn.Linear(self.NUM_NODE_FEATURES,
node_feat_emb_size)
self.node_opcode_embedding = nn.Embedding(self.NUM_OPCODES,
node_opcode_emb_size)
config_feat_size = tile_config_feat_size if is_tile else node_config_feat_size
concat_node_feat_size = (node_feat_emb_size +
node_opcode_emb_size +
config_feat_size)
if is_tile or is_nlp or wider_config: # enable wider config for tile and for nlp by default
in_channels = 64
channel_config = [256, 256, 256, 256, 512, 512, 512, 512]
else:
in_channels = 32
channel_config = [64, 64, 128, 128, 256, 256]
assert len(channel_config) > 0
self.add_residuals: bool
if is_nlp:
self.add_residuals = True
else:
self.add_residuals = False
self.input_shaping = nn.Linear(concat_node_feat_size, in_channels)
self.convs = nn.ModuleList()
in_ch = in_channels
for out_ch in channel_config:
conv = SAGEConv(in_ch, out_ch)
self.convs.append(conv)
in_ch = out_ch
REGRESSION_SIZE = 1
self.output_shaping = nn.Linear(channel_config[-1], REGRESSION_SIZE)
self.aggr_sum = aggr.SumAggregation()
def forward(self,
node_feat: torch.Tensor,
node_opcode: torch.Tensor,
batch: torch.Tensor,
ptr: torch.Tensor,
node_config_feat: torch.Tensor,
node_config_ids: torch.Tensor,
node_config_ptr: torch.Tensor,
config_feat: torch.Tensor,
config_feat_ptr: torch.Tensor,
edge_index: torch.Tensor,
ub_size: int, # microbatch_size
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
DataBatch(
node_feat=[525076, 140],
node_opcode=[525076],
batch=[525076],
ptr=[41],
node_config_feat=[35496, 18],
node_config_ids=[35496],
node_config_batch=[35496],
node_config_ptr=[41],
edge_index=[2, 896088],
)
"""
is_tile = config_feat is not None
SCALE_MS_TO_SEC = 1e-3
batch_size = ptr.shape[0] - 1
if batch_size % ub_size != 0:
print(f"Warning: batch size {batch_size} not divisible "
f"by microbatch size {ub_size}. "
f"Fine for val, error for train.")
num_nodes = node_feat.shape[0]
if is_tile:
config_feat_size = config_feat.shape[0] // batch_size
else:
config_feat_size = node_config_feat.shape[1]
node_feat_log = torch.log1p(torch.abs(node_feat)) * torch.sign(node_feat)
node_feat_emb = self.node_feat_embedding(node_feat_log)
node_opcode_emb = self.node_opcode_embedding(node_opcode.long())
if is_tile:
graph_config_list = []
for ib in range(batch_size):
config_slice = slice(config_feat_ptr[ib],
config_feat_ptr[ib+1])
num_nodes_in_graph = ptr[ib+1] - ptr[ib]
graph_config = config_feat[config_slice]
graph_config_tiled = torch.tile(graph_config.unsqueeze(0),
(num_nodes_in_graph, 1))
graph_config_list.append(graph_config_tiled)
config_feat_all = torch.concat(graph_config_list)
else:
config_feat_all = torch.zeros(size=(num_nodes, config_feat_size),
dtype=torch.float32, device=node_feat.device)
for ib in range(batch_size):
config_slice = slice(node_config_ptr[ib],
node_config_ptr[ib+1])
sample_config_ids = node_config_ids[config_slice]
sample_config_feat = node_config_feat[config_slice]
global_config_ids = sample_config_ids + ptr[ib]
config_feat_all[global_config_ids, :] = sample_config_feat
node_feat_all = torch.cat((node_feat_emb,
node_opcode_emb,
config_feat_all), dim=-1)
feat = F.relu(self.input_shaping(node_feat_all))
for conv in self.convs:
feat_out = conv(feat, edge_index)
if self.add_residuals and (feat_out.shape[1] == feat.shape[1]):
feat = feat_out + feat # resudual connection
else:
feat = feat_out
feat = F.relu(feat)
per_node_latencies_unsq = self.output_shaping(feat)
# branch for MAPE
per_graph_latenies_ms = self.aggr_sum(per_node_latencies_unsq, batch)
per_graph_latenies_ms_sq = per_graph_latenies_ms.squeeze(-1)
if is_tile:
per_graph_latenies = per_graph_latenies_ms_sq
else:
per_graph_latenies = SCALE_MS_TO_SEC * per_graph_latenies_ms_sq
# branch for diff matrix
assert batch_size % ub_size == 0
num_microbatches = batch_size // ub_size
diff_triu_vector_list = []
for iub in range(num_microbatches):
ub_slice = slice(iub*ub_size,
(iub+1)*ub_size)
# per_ub_latencies [ub_size]
per_ub_latencies = per_graph_latenies[ub_slice]
# diff_matrix [ub_size, ub_size]
diff_matrix = make_diff_matrix(per_ub_latencies)
# triu_len = ub_size*(ub_size-1)/2. Ex triu_len=6 for ub_size=4.
# diff_triu_vector [triu_len]
diff_triu_vector = triu_vector(diff_matrix)
diff_triu_vector_list.append(diff_triu_vector)
# diff_triu_vector_stack [num_microbatches, triu_len]
diff_triu_vector_stack = torch.stack(diff_triu_vector_list)
return per_graph_latenies, diff_triu_vector_stack
|
Obs01ete/kaggle_latenciaga
|
src/model.py
|
model.py
|
py
| 7,190 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70749170429
|
#Server dependencies
#from gevent.pywsgi import WSGIServer
from threading import Thread
from flask import Flask, request, send_from_directory
from flask_mobility import Mobility
import os
#Apis used
from assistant import sendToAssistant
#File management
#from bs4 import BeautifulSoup
import codecs
#import re
import json
#import logging
#Miscelaneous
from datetime import datetime
#Warnings
#import warnings
#warnings.filterwarnings("ignore")
#Logging configuration set to debug on history.log file
#logging.basicConfig(filename='history.log', level=logging.DEBUG)
#logging.basicConfig(
# format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
def run():
#Flask built in deploy for development (lazy loading)
app.run(host='0.0.0.0',port=8081)
#WSGIServer deploy for production.
#WSGIServer(('', 8081), app).serve_forever()
#Cache reloading medthod
def cacheWorkaround(file):
date = datetime.today().strftime('%Y-%m-%d')
return file.read().replace('REPLACE', date)
#Open html files
def loadPage(src):
#For cache reloading on load
#return cacheWorkaround(src)
return codecs.open(src, "r", "utf-8").read()
#Designated thread for server proccess
def keep_alive():
t = Thread(target=run)
t.start()
#Flask app
app = Flask(__name__)
Mobility(app)
#Disable unwanted dependencies logging
#werkzeugLog = logging.getLogger('werkzeug')
#werkzeugLog.disabled = True
#requestsLog = logging.getLogger("urllib3.connectionpool")
#requestsLog.disabled = True
@app.route('/')
def main():
#Main endpoint corresponds to index.html
site = loadPage("index.html")
return site
@app.route('/favicon.ico')
def favicon():
return send_from_directory(
os.path.join(app.root_path, 'static'),
'favicon.ico',
mimetype='image/vnd.microsoft.icon')
@app.route('/input', methods=['GET'])
@app.route('/demo/input', methods=['GET'])
def web():
#server endpoint for client-watson connection
msg = request.args.get('msg')
if '\n' in msg:
msg = msg.replace('\n', '')
#logging.info('Incoming: ' + msg)
session_id = ''
try:
#sends input to watson for message analize
response, session_id = sendToAssistant(msg)
#logging.info('Watson: ' + str(response))
except:
#Critical error, either watsons response was uncall for, or server error.
response = "Error"
#logging.info('Out: ' + response)
return json.dumps(response)+'|'+str(session_id)
if __name__ == '__main__':
keep_alive()
|
Creativity-Hub/chatbot_template
|
main.py
|
main.py
|
py
| 2,540 |
python
|
en
|
code
| 1 |
github-code
|
6
|
12066432298
|
import tensorflow as tf
import numpy as np
import pandas as pd
import ast
import pickle
class TetrisAgent:
def __init__(self):
nx = 3400
ny = 34
self.X = tf.placeholder(dtype=tf.float32, shape=(None, nx + 1), name="X")
theta = tf.Variable(tf.random_uniform([nx + 1, 34], -1.0, 1.0), name="theta")
self.y_pred = tf.matmul(self.X, theta, name="predictions")
init = tf.global_variables_initializer()
saver = tf.train.Saver()
self.sess = tf.Session()
self.sess.run(init)
saver.restore(self.sess, "models/my_model_final.ckpt")
print("model restored", theta.eval(session = self.sess))
print("theta shape", theta.shape)
def play(self, input):
y = self.y_pred.eval(session = self.sess, feed_dict={self.X: input})
return y
def pick(self, placements):
input = np.array(placements).reshape(1, 3400)
input = np.c_[1, input]
y = self.play(input)
picked = max((v, i) for i, v in enumerate(y[0]))[1]
return picked
|
smlgorta/dltetris
|
tetris_agent.py
|
tetris_agent.py
|
py
| 1,069 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2799425380
|
import os.path
import random
import torchvision.transforms as transforms
import torch
from data.base_dataset import BaseDataset, get_params, get_transform, normalize
from data.image_folder import make_dataset
from PIL import Image
import numpy as np
class AlignedDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
### label maps
self.dir_label = os.path.join(opt.dataroot, opt.phase + '_label')
self.label_paths = sorted(make_dataset(self.dir_label))
### real images
if opt.isTrain:
self.dir_image = os.path.join(opt.dataroot, opt.phase + '_img')
self.image_paths = sorted(make_dataset(self.dir_image))
### load face bounding box coordinates size 128x128
if opt.face_discrim or opt.face_generator:
self.dir_facetext = os.path.join(opt.dataroot, opt.phase + '_facetexts128')
print('----------- loading face bounding boxes from %s ----------' % self.dir_facetext)
self.facetext_paths = sorted(make_dataset(self.dir_facetext))
self.dataset_size = len(self.label_paths)
def __getitem__(self, index):
### label maps
paths = self.label_paths
label_path = paths[index]
label = Image.open(label_path).convert('RGB')
params = get_params(self.opt, label.size)
transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
label_tensor = transform_label(label)
original_label_path = label_path
image_tensor = next_label = next_image = face_tensor = 0
### real images
if self.opt.isTrain:
image_path = self.image_paths[index]
image = Image.open(image_path).convert('RGB')
transform_image = get_transform(self.opt, params)
image_tensor = transform_image(image).float()
is_next = index < len(self) - 1
if self.opt.gestures:
is_next = is_next and (index % 64 != 63)
""" Load the next label, image pair """
if is_next:
paths = self.label_paths
label_path = paths[index+1]
label = Image.open(label_path).convert('RGB')
params = get_params(self.opt, label.size)
transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
next_label = transform_label(label).float()
if self.opt.isTrain:
image_path = self.image_paths[index+1]
image = Image.open(image_path).convert('RGB')
transform_image = get_transform(self.opt, params)
next_image = transform_image(image).float()
""" If using the face generator and/or face discriminator """
if self.opt.face_discrim or self.opt.face_generator:
facetxt_path = self.facetext_paths[index]
facetxt = open(facetxt_path, "r")
face_tensor = torch.IntTensor(list([int(coord_str) for coord_str in facetxt.read().split()]))
input_dict = {'label': label_tensor.float(), 'image': image_tensor,
'path': original_label_path, 'face_coords': face_tensor,
'next_label': next_label, 'next_image': next_image }
return input_dict
def __len__(self):
return len(self.label_paths)
def name(self):
return 'AlignedDataset'
|
carolineec/EverybodyDanceNow
|
data/aligned_dataset.py
|
aligned_dataset.py
|
py
| 3,566 |
python
|
en
|
code
| 639 |
github-code
|
6
|
7774997291
|
# state_list : uma matriz de duas colunas, onde a primeira representa a
# matriz estado,e a segunda se ela ainda possui movimentos
# inexplorados.
# Função Move : Recebe um estado, e tenta mover uma peça da coluna X
# para a coluna Y. Se não for possível, ou se X = Y,
# retorna uma matriz idêntica a matriz recebida.
# Depth_Hanoi : Tenta sempre o primeiro movimento possível de maneira
# distribuída da esquerda até a direita, ordem:
#
# pilar da esquerda para pilar do meio;
# pilar da esquerda para pilar da direita;
# pilar do meio para pilar da esquerda;
# pilar do meio para pilar da direita;
# pilar da direita para pilar da esquerda;
# pilar da direita para pilar do meio;
# Breadth_Hanoi: Tenta todos os movimentos possíveis de um estado, e marca que este
# é o estado "pai" deles, e depois muda o estado atual para o próximo
# estadoencontrado, a não ser que o ultimo estado encontrado seja
# o estado objetivo. Encontra o caminho voltando pelos pais.
#Greedy_hanoi : Verifica a heuristica de todos os movimentos possíveis e escolhe o de
# melhor heuristica(menor valor). Repete até chegar no resultado
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def was_visited(matrix,state_list): #confere se o estado ja foi visitado
for i in range(0, len(state_list)):
if state_list[i][0] == matrix:
return True
return False
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def set_false(matrix,state_list): # define o estado como False na lista de estados
for x in range(len(state_list) - 1, -1, -1):
if state_list[x][0] == matrix:
state_list[x][1] = False;
break
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def get_last_true(state_list): # pega o ultimo estado True na lista de estado
for x in range(len(state_list) - 1, -1, -1):
if state_list[x][1] == True:
return state_list[x][0]
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def position_of(matrix,state_list):
for i in range(len(state_list)):
if state_list[i][0] == matrix:
return i
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def print_result(state_list): # para printar em uma formatação mais facil de ser entendida
print("Solução encontrada:")
for i in range(3): # para printar o topo, depois o meio e a base dos estados
for state in state_list:
if state[1]: # se o estado estiver marcado como verdadeiro
print(state[0][i], end = " ") # printa a parte correspondente
if(i == 1):
print("-->", end = " ") # se for o meio,a diciona uma seta("-->")
else:
print(" ", end = " ")
print() #nova linha
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def Move(matrix, state_list, coluna_x, coluna_y): # tenta mover da coluna X para a coluna Y, e retorna a matriz resultado
if coluna_x == coluna_y: # nao e possivel mover uma peca para a posicao onde ele ja esta
return matrix
copy = [[0,0,0],[0,0,0],[0,0,0]] # copia para trabalhar com a matriz
for i in range(0,3):
for j in range(0,3):
copy[i][j] = matrix[i][j]
has_moved = False
for i in range(0,3):
if copy[i][coluna_x] != 0 and not has_moved: # le a coluna atual do topo ate o fim, e pega o disco do topo
for j in range(2,-1,-1): # le a coluna destino do fim ate o topo
if(copy[j][coluna_y] == 0 and j == 2): # se a posicao livre for a base
copy[j][coluna_y] = copy[i][coluna_x] # destino = disco a ser movido
copy[i][coluna_x] = 0 # atual = 0
has_moved = True # diz que foi posivel mover
break;
elif(copy[j][coluna_y] == 0 and copy[j + 1][coluna_y] > copy[i][coluna_x]): # se a posicao livre nao for a base, compara se o disco
# abaixo e maior que o disco a ser movido
copy[j][coluna_y] = copy[i][coluna_x] # destino = disco a ser movido
copy[i][coluna_x] = 0 # atual = 0
has_moved = True # diz que foi posivel mover
break;
return copy;
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def Depth_Hanoi(start_matrix, goal_matrix):
goal_reached = False
state_list = [[start_matrix[:], True]] # adiciona a matriz inicial a lista de estados
restart_while = False # var aux para recomecar while
movimentos = 0
while (not goal_reached):
restart_while = False
for i in range(0, 3):
for j in range(0, 3): # para tentar todos os movimentos possiveis
attempt = Move(start_matrix, state_list, i, j) # tenta mover
if (attempt != start_matrix and not was_visited(attempt, state_list)): # se o movimento for possivel, e o novo estado ja nao foi visitado
state_list.append([attempt[:], True]) # adiciona o estado novo na lista
start_matrix[:] = attempt # este passa a ser o estado atual
movimentos+=1
restart_while = True # deve-se recomecar o while para tentar todos os moves possiveis para o novo estado
print("movimento ",movimentos , ": disco da estaca ",i+1 , " para a estaca: ", j+1)
print(start_matrix[0])
print(start_matrix[1])
print(start_matrix[2])
if (start_matrix == goal_matrix): # se achar o resultado, para tudo
goal_reached = True
if(restart_while): #quebra os FOR loops para recomecar o while
break
if(restart_while):
break
if(not restart_while): # se todos os movimentos foram tentados e nao se achou nenhum possivel
set_false(start_matrix, state_list)
start_matrix[:] = get_last_true(state_list)
print_result(state_list)
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def Breadth_Hanoi(start_matrix, goal_matrix):
goal_reached = False
state_list = [[start_matrix[:], True,-1]] # adiciona a matriz inicial a lista de estados
movimentos = 0
next = 0 #para passar para o proximo estado
while (not goal_reached):
for i in range(0, 3): # tenta todos os movimentos para este estado
if(not goal_reached):
for j in range(0, 3):
attempt = Move(start_matrix, state_list, i, j)
if not was_visited(attempt,state_list): # se o estado novo gerado ja nao foi visitado
movimentos += 1
print("movimento ", movimentos, ": disco da estaca ", i + 1, " para a estaca: ", j + 1) # printa o estado na tela
print(attempt[0])
print(attempt[1])
print(attempt[2])
state_list.append([attempt[:], True, position_of(start_matrix,state_list)]) # adiciona o estado na lista, e diz que o estado atual é seu pai
if (attempt == goal_matrix): # se o movimento chegou ao estado objetivo
start_matrix = attempt # define como o estado atual
next = -1
goal_reached = True
next +=1
start_matrix[:] = state_list[next][0] # passa para o proximo estado
parent = len(state_list)-1
path = []
while(parent != -1): # passa pelo state_list e vai coletando os pais de cada estado, para encontrar o caminho solução
path.insert(0,state_list[parent]) # insere no começo da lista
parent = state_list[parent][2]
print_result(path)
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def Greedy_Hanoi(start_matrix, goal_matrix, heuristic):
goal_reached = False
state_list = [[start_matrix[:], True]] # adiciona a matriz inicial a lista de estados
movimentos = 0
path = []
while (not goal_reached):
possible_moves = []
for i in range(0, 3): # tenta todos os movimentos para este estado
if (not goal_reached):
for j in range(0, 3):
attempt = Move(start_matrix, state_list, i, j) # descobre qual é o movimento
possible_moves.append([attempt[:],heuristic(attempt)]) #adiciona aos movimentos possíveis, junto com sua heuristica
if (attempt == goal_matrix): # se o movimento chegou ao estado objetivo
start_matrix = attempt # define como o estado atual
goal_reached = True
set_false(start_matrix, state_list) # apos esgotar os movimentos possiveis, define este estado como False
smallest = 9999
for i in range(0,len(possible_moves)): # pega o melhor movimento(menor heuristica) que
if(not was_visited(possible_moves[i][0],state_list)):
movimentos += 1
print("movimento ", movimentos) # printa o estado na tela
print(possible_moves[i][0][0])
print(possible_moves[i][0][1])
print(possible_moves[i][0][2])
if possible_moves[i][1] < smallest: #ainda nao foi visitado
smallest = possible_moves[i][1]
start_matrix[:] = possible_moves[i][0]
path.append([start_matrix[:],True]) # adiciona este estado na resposta
state_list.append([start_matrix[:], True]) # adiciona o estado na lista
print_result(path)
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
LucasEduardoGiovanini/AI-Implementations
|
Hanoi_Tower_Searches.py
|
Hanoi_Tower_Searches.py
|
py
| 11,949 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
6251031632
|
import matplotlib.pyplot as plt
import eel
eel.init("web")
userid = []
@eel.expose
def login(uid,upass):
get = open("data.txt", "r")
temp = get.readlines()
udata = []
for i in range(len(temp)):
if temp[i].startswith(uid):
udata = temp[i].split("|")
#print(udata)
if udata[2] != upass:
return "false"
else:
if udata[0].startswith("vt"):
return udata,"voter"
elif udata[0].startswith("ca"):
return udata,"candidate"
elif udata[0].startswith("st"):
return udata,"staff"
get.close()
def vsort():
get = open("data.txt", "r")
get.seek(0)
temp = get.readlines()
v = []
c = []
s = []
l = ""
for i in range(len(temp)):
if temp[i].startswith("vt"):
v.append(temp[i])
if temp[i].startswith("st"):
s.append(temp[i])
if temp[i].startswith("ca"):
c.append(temp[i])
if temp[i].startswith("system"):
l = temp[i]
fout = open("data.txt", "w")
v.sort()
c.sort()
s.sort()
for j in range(len(c)):
final = f"{c[j]}"
fout.write(final)
for j in range(len(s)):
final = f"{s[j]}"
fout.write(final)
for j in range(len(v)):
final = f"{v[j]}"
fout.write(final)
fout.write(l)
fout.close()
fin1 = open("data.txt","r")
fin2 = open("index.txt","w")
fin1.seek(0)
details = fin1.readlines()
for i in range(len(details)):
data = details[i].split("|")
fin2.write(f"{i}|{data[0]}|\n")
fin1.close()
fin2.close()
get.close()
@eel.expose
def display_teams():
gets = open("data.txt", "r")
temp = gets.readlines()
clist = []
iget = open("index.txt","r")
index = iget.readlines()
cid=[]
fdata = []
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith("ca"):
cid.append(data[0])
for i in cid:
i=int(i)
clist.append(temp[i])
for i in range(len(clist)):
data = clist[i].split("|")
fdata.append(f"{data[0]}|{data[1]}|{data[3]}|")
print(fdata)
gets.close()
iget.close()
return fdata
@eel.expose
def display_voter(uid):
gets = open("data.txt", "r")
temp = gets.readlines()
clist = []
iget = open("index.txt","r")
index = iget.readlines()
cid=[]
fdata = []
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith(uid):
cid.append(data[0])
for i in cid:
i=int(i)
clist.append(temp[i])
for i in range(len(clist)):
data = clist[i].split("|")
fdata = data
gets.close()
iget.close()
return fdata
def system_status():
data = []
get = open("data.txt","r")
temp = get.readlines()
for i in range(len(temp)):
if temp[i].startswith("system"):
data = temp[i].split("|")
if data[1]=="true":
return True
elif data[1]=="false":
return False
get.close()
@eel.expose
def cast_vote(cid,uid):
iget=open("index.txt","r")
index = iget.readlines()
get = open("data.txt","r")
temp = get.readlines()
cindex = ""
vindex = ""
cdata = []
vdata = []
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith(cid):
cindex = data[0]
if data[1].startswith(uid):
vindex = data[0]
vidx =int(vindex)
cidx =int(cindex)
vdata = temp[vidx].split("|")
cdata = temp[cidx].split("|")
val = system_status()
if val:
if vdata[5] == "false":
vote = int(cdata[4])
vote += 1
cdata[4] = vote
vdata[5] = "true"
cupdate = f"{cdata[0]}|{cdata[1]}|{cdata[2]}|{cdata[3]}|{cdata[4]}|\n"
vupdate = f"{vdata[0]}|{vdata[1]}|{vdata[2]}|{vdata[3]}|{vdata[4]}|{vdata[5]}|\n"
fout = open("data.txt","w")
for i in range(len(temp)):
if temp[i].startswith(vdata[0]):
temp[i] = vupdate
if temp[i].startswith(cdata[0]):
temp[i] = cupdate
for i in range(len(temp)):
fout.write(temp[i])
fout.close()
iget.close()
get.close()
return "voted successfully"
else:
iget.close()
get.close()
return "it seems you have already voted"
else:
return("Vote System is Inactive")
@eel.expose
def profile(vid):
print(vid)
iget = open("index.txt","r")
index = iget.readlines()
get = open("data.txt","r")
temp = get.readlines()
pid = ""
for i in range(len(index)):
data = index[i].split("|")
if data[1] == vid:
pid = data[0]
pid = int(pid)
user_data = temp[pid].split("|")
print(user_data)
get.close()
iget.close()
return user_data,"true"
@eel.expose
def winner():
cid = []
c_teams = []
c_votes = []
iget = open("index.txt","r")
get = open("data.txt","r")
index = iget.readlines()
temp = get.readlines()
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith("ca"):
cid.append(data[0])
for i in cid:
i=int(i)
data = temp[i].split("|")
c_teams.append(data[3])
c_votes.append(int(data[4]))
num = max(c_votes)
id = c_votes.index(num)
name = c_teams[id]
plt.bar(c_teams,c_votes)
plt.savefig("web\output1", facecolor='w', bbox_inches="tight",pad_inches=0.3, transparent=True)
plt.title('winner')
plt.xlabel('teams')
plt.ylabel('Votes')
get.close()
iget.close()
return name
@eel.expose
def system_mod(uid,override_key):
sid = []
iget = open("index.txt","r")
get = open("data.txt","r")
index = iget.readlines()
temp = get.readlines()
details = temp
flag = False
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith("st"):
sid.append(data[0])
for i in sid:
i = int(i)
data = temp[i].split("|")
if data[0].startswith(uid):
if data[3].startswith(override_key):
flag = True
if flag:
for i in range(len(details)):
if details[i].startswith("system"):
s_data = details[i].split("|")
if s_data[1] == "true":
s_data[1] = "false"
s_final = f"{s_data[0]}|{s_data[1]}|"
for j in range(len(details)):
if details[j].startswith("system"):
details[j] = s_final
fout = open("data.txt", "r+")
for k in range(len(details)):
op = f"{details[k]}"
fout.write(op)
fout.close()
vsort()
return "System is Inactive"
elif s_data[1] == "false":
s_data[1] = "true"
s_final = f"{s_data[0]}|{s_data[1]}|"
for j in range(len(details)):
if details[j].startswith("system"):
details[j] = ""
details[j] = s_final
fout = open("data.txt", "r+")
for k in range(len(details)):
op = f"{details[k]}"
fout.write(op)
fout.close()
vsort()
return "system is active"
@eel.expose
def reset(uid,override_key):
sid = []
key = str(override_key)
iget = open("index.txt","r")
get = open("data.txt","r")
index = iget.readlines()
temp = get.readlines()
details = temp
flag = False
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith("st"):
sid.append(data[0])
for i in sid:
i = int(i)
data = temp[i].split("|")
if data[0].startswith(uid):
if data[3].startswith(key):
flag = True
print("pass")
if flag:
for i in range(len(details)):
if details[i].startswith("vt"):
data = details[i].split("|")
data[5] = "false"
vupdate = f"{data[0]}|{data[1]}|{data[2]}|{data[3]}|{data[4]}|{data[5]}|\n"
for i in range(len(details)):
if details[i].startswith(data[0]):
details[i] = vupdate
fout = open("data.txt", "w")
for j in range(len(details)):
op = f"{details[j]}"
fout.write(op)
fout.close()
if details[i].startswith("ca"):
cdata = details[i].split("|")
cdata[4] = 0
cupdate = f"{cdata[0]}|{cdata[1]}|{cdata[2]}|{cdata[3]}|{cdata[4]}|\n"
for i in range(len(details)):
if details[i].startswith(cdata[0]):
details[i] = cupdate
fout = open("data.txt", "w")
for j in range(len(details)):
op = f"{details[j]}"
fout.write(op)
fout.close()
vsort()
return "System Data is been formated"
else:
return "Failed"
@eel.expose
def add_voter(name,password,age,ano):
get = open("data.txt","r")
details = get.readlines()
count = 0
voters = []
for i in range(len(details)):
if details[i].startswith("vt"):
data = details[i].split("|")
if data[4] == ano:
return "person already exsist"
for i in range(len(details)):
if details[i].startswith("vt"):
voters.append(details[i])
temp = voters[-1].split("|")
id = temp[0]
count = id[2:]
b = int(count)
count = b
if count < 9:
count = count+1
vid = "000"+str(count)
elif count<99:
count+=1
vid = "00"+str(count)
elif count<999:
count+=1
vid = "0"+str(count)
else:
vid = str(count)
id = "vt"+vid
status = "false"
final = f"{id}|{name}|{password}|{age}|{ano}|{status}|\n"
details.append(final)
fout = open("data.txt", "w")
for j in range(len(details)):
op = f"{details[j]}"
fout.write(op)
fout.close()
vsort()
get.close()
return "Voter added successfully"
@eel.expose
def remove_voter(uid):
if uid[0:2] == "vt":
data = []
get = open("data.txt","r")
details = get.readlines()
for i in range(len(details)):
if details[i].startswith(uid):
clear = ""
data=details[i].split("|")
details[i] = clear
fout = open("data.txt", "r+")
for j in range(len(details)):
op = f"{details[j]}"
fout.write(op)
fout.close()
vsort()
get.close()
return f"{data[1]}"
else:
return "false"
@eel.expose
def modify_voter(uid,name,password,age,ano):
vid = []
get = open("data.txt","r")
iget = open("index.txt","r")
details=get.readlines()
index = iget.readlines()
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith(uid):
vid = data[0]
vid = int(vid)
v_data = details[vid].split("|")
v_data[1] = name
v_data[2] = password
v_data[3] = age
v_data[4] = ano
vupdate = f"{v_data[0]}|{v_data[1]}|{v_data[2]}|{v_data[3]}|{v_data[4]}|{v_data[5]}|\n"
for i in range(len(details)):
if details[i].startswith(v_data[0]):
details[i]=vupdate
fout = open("data.txt", "r+")
for j in range(len(details)):
op = f"{details[j]}"
fout.write(op)
fout.close()
get.close()
iget.close()
vsort()
return "data Modified"
@eel.expose
def add_candidate(name,password,teamname):
get = open("data.txt","r")
details = get.readlines()
count = 0
candi=[]
for i in range(len(details)):
if details[i].startswith("ca"):
data = details[i].split("|")
if data[1] == name or data[3] == teamname:
return "team already exsist"
for i in range(len(details)):
if details[i].startswith("ca"):
candi.append(details[i])
temp = candi[-1].split("|")
id = temp[0]
count = id[2:]
b = int(count)
count = b
if count < 9:
count = count+1
vid = "000"+str(count)
elif count<99:
count+=1
vid = "00"+str(count)
elif count<999:
count+=1
vid = "0"+str(count)
else:
vid = str(count)
id = "ca"+vid
votes = 0
final = f"{id}|{name}|{password}|{teamname}|{votes}|\n"
details.append(final)
fout = open("data.txt", "w")
for j in range(len(details)):
op = f"{details[j]}"
fout.write(op)
fout.close()
vsort()
get.close()
return "Team Added Succesfully"
@eel.expose
def remove_candidate(uid):
get = open("data.txt","r")
details = get.readlines()
for i in range(len(details)):
if details[i].startswith(uid):
clear = ""
data = details[i].split("|")
details[i] = clear
fout = open("data.txt", "r+")
for j in range(len(details)):
op = f"{details[j]}"
fout.write(op)
fout.close()
vsort()
get.close()
return data[3]
@eel.expose
def display_candidate(vid):
gets = open("data.txt", "r")
temp = gets.readlines()
clist = []
iget = open("index.txt","r")
index = iget.readlines()
cid=[]
fdata = []
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith(vid):
cid.append(data[0])
for i in cid:
i=int(i)
clist.append(temp[i])
for i in range(len(clist)):
data = clist[i].split("|")
fdata = data
#print(fdata)
gets.close()
iget.close()
return fdata
@eel.expose
def modify_candidate(uid,name,password,teamname):
vid = []
get = open("data.txt","r")
iget = open("index.txt","r")
details=get.readlines()
index = iget.readlines()
for i in range(len(index)):
data = index[i].split("|")
if data[1].startswith(uid):
vid = data[0]
vid = int(vid)
v_data = details[vid].split("|")
v_data[1] = name
v_data[2] = password
v_data[3] = teamname
vupdate = f"{v_data[0]}|{v_data[1]}|{v_data[2]}|{v_data[3]}|{v_data[4]}|\n"
for i in range(len(details)):
if details[i].startswith(v_data[0]):
details[i]=vupdate
fout = open("data.txt", "r+")
for j in range(len(details)):
op = f"{details[j]}"
fout.write(op)
fout.close()
get.close()
iget.close()
vsort()
return "Modified Successfully"
@eel.expose
def allvoter():
get = open("data.txt","r")
get.seek(0)
temp = get.readlines()
voters = []
for i in range(len(temp)):
if temp[i].startswith("vt"):
voters.append(temp[i])
get.close()
count = len(voters)
return voters,count
@eel.expose
def allcandidate():
get = open("data.txt","r")
get.seek(0)
temp = get.readlines()
candidate = []
for i in range(len(temp)):
if temp[i].startswith("ca"):
candidate.append(temp[i])
get.close()
count = len(candidate)
return candidate,count
eel.start("index.html")
#if __name__ == '__main__':
#allvoter()
#vsort()
#login("vt1001","1001")
#cast_vote("ca1001","vt1008")
#profile("vt1001")
#profile("ca0002")
#profile("st1002")
#display_teams()
# val = display_voter("vt0002")
# print(val)
#system_status()
#val=winner()
#print(val)
#system_mod("st1001","5656")
#val=reset("st0001","5656")
#print(val)
#add_voter("pheonix","20","2101","1010")
#remove_voter("vt0004")
#modify_voter("vt1009","breach","25","1009","3102")
#add_candidate("thanos","1004","genysis")
#remove_candidate("ca0003")
#modify_candidate("ca1003","brimstone","1003","valo")
|
vish0290/Polling-System-File-Structures
|
main.py
|
main.py
|
py
| 17,483 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36312680043
|
import pandas as pd
from xml.etree import ElementTree as ET
import requests
from datetime import datetime
import matplotlib.pyplot as plt
q_range = {'Range': 1}
q_resp_group = {'ResponseGroup': 'History'}
q_url_WHO = {'https://www.who.int/'}
q_date = datetime.date(datetime.now())
url = "https://awis.api.alexa.com/api?" \
"Action=TrafficHistory" \
"&Range=31" \
"&ResponseGroup=History" \
"&Url=https://coronavirus.jhu.edu/map.html"\
"&Start=20200301"\
payload = {}
headers = {
# 'x-api-key': '''
# ### your_api_key_here ###
# '''
}
response = requests.request("GET", url, headers=headers, data=payload)
awis_xml_str = response.text.encode('utf8')
# parse directly from texts instead of file
root = ET.fromstring(awis_xml_str)
column_names = ["Date", "Page_View_Per_Million", "Page_View_Per_User", "Rank", "Reach_Per_Million"]
click_ratio_table = pd.DataFrame(columns=column_names)
for results in root.findall('Results'):
for result in results.findall('Result'):
for alexa in result.findall('Alexa'):
for trafficHistory in alexa.findall('TrafficHistory'):
for historicalData in trafficHistory.findall('HistoricalData'):
historical_data = ET.tostring(historicalData)
root2 = ET.fromstring(historical_data)
for data in root2:
date = data[0].text
ppm = pd.to_numeric(data[1][0].text)
ppu = pd.to_numeric(data[1][1].text)
rank = pd.to_numeric(data[2].text)
rpm = pd.to_numeric(data[3][0].text)
new_row = [date, ppm, ppu, rank, rpm]
click_ratio_table.loc[len(click_ratio_table)] = new_row
click_ratio_table = pd.DataFrame(click_ratio_table)
# plt.cla()
# plt.plot(click_ratio_table.loc[:, 'Date'], click_ratio_table.loc[:, 'Page_View_Per_Million'])
# plt.title('JHU Page_View_Per_Million')
# # click_ratio_table.shape()
|
alanalien/covid_19_circumstantial_evidences
|
data_processing_funs/click_ratio_data.py
|
click_ratio_data.py
|
py
| 1,858 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7653898910
|
import random
from math import exp, floor, log
from hypergraphx import Hypergraph
import numpy as np
def find_intersection(x, func):
num = floor((x / func[0]) ** (1 / func[1]))
return num
def non_linear_distribution(infected_nodes, param, len_edge):
# TODO: define the non-linear distribution, in order to give some sort of non linearity to the contagion process
# B(n, i) = (gamma * i)**param
# if we use non linear probability
# if i node in the same group try to infect the node j, the probability of success (gamma * i^param)/n
# where n is the number of nodes in the edge, 0 < param < 1, gamma > 0
gamma = 0.2
# using function proposed, little modified, by the paper https://arxiv.org/pdf/2105.07092.pdf
return (gamma * infected_nodes) ** param
# return 0.005
def regular_probability():
# if we use regular probability
# if i node try to infect the node j, the probability of success is 1 - (1 - gamma) ^ i
# where 0 < gamma < 1, i > 0 is the number of infected nodes trying to infect the node j
gamma = 0.01
return gamma
def m_contagion(hypergraph, I_0, T):
# make a documentation for the function
"""
non_linear_Contagion
Simulates the contagion process on a simplicial hypergraph, in a non-linear function of probability.
The process is run for T time steps.
The initial condition is given by I_0, which is a vector of length equal to the number of nodes in the hypergraph.
The infection rate is beta, the three-body infection rate is beta_D, and the recovery rate is mu.
The output is a vector of length T, where the i-th entry is the fraction of infected nodes at time i.
....
Parameters
----------
hypergraph : hypergraphx.Hypergraph
The hypergraph on which the contagion process is run.
I_0 : numpy.ndarray
The initial condition of the contagion process.
T : int
The number of time steps.
Returns
-------
numpy.ndarray
The fraction of infected nodes at each time step.
"""
file = open("./results/contagion_param.txt", "w")
file.write(
"we calculate probability of infection (gamma * infected_nodes)**param with the following parameters (gamma, param): \n")
file.write("param: " + str(1) + "\n")
file.write("gamma: " + str(0.05) + "\n")
numberInf = np.linspace(0, 0, T)
Infected = np.sum(I_0)
new_Infected = Infected
numberInf[0] = Infected
N = len(I_0)
nodes = hypergraph.get_nodes()
mapping = hypergraph.get_mapping()
I_old = np.copy(I_0)
I_prec = I_old
t = 1
print("start campaign")
while t < T:
new_Infected -= new_Infected
# print (new_Infected)
# create a new vector I_new that is same length as I_old but only contains zeros
I_new = np.zeros(len(I_old))
count = 0
mean = 0
for edge in hypergraph.get_edges():
# find the percentage of the infected nodes in the edge
infected_nodes = 0
for node in edge:
if I_old[node] == 1:
infected_nodes += 1
infected_nodes_perc = infected_nodes / len(edge)
# run the infection process through the edge
for node in edge:
if I_old[node] == 0 and I_new[node] == 0:
# we can chose between a regular probability or a non-linear probability
prob = regular_probability()
if random.random() * 10000 < prob * 10000:
I_new[node] = 1
if t == 1:
s = ""
for node in edge:
if I_old[node] == 0 and I_new[node] == 0:
s += "0"
else:
s += "1"
if s.__contains__("1"):
count += 1
mean += s.__len__()
if t == 1:
print(count)
print(mean / count)
I_prec = I_new
# update the infected nodes
for i in range(0, len(I_old)):
if I_new[i] == 1:
I_old[i] = 1
new_Infected += np.sum(I_new)
Infected += new_Infected
numberInf[t] = Infected
# print("Time step: " + str(t) + " - Infected nodes: " + str(Infected))
t += 1
if new_Infected == 0:
pass
# print("No more infected nodes")
# print("at time step: " + str(t) + " - Infected nodes: " + str(Infected))
return numberInf[:t - 1] / N
def threshold_calc(length_edge, theta_0):
return length_edge * theta_0
def function_len(param):
return np.log2(param)
def SIS_contagion(hypergraph, I_condition, T, lam_in, delta_in, theta_in):
# make a documentation
"""
SIS contagion
model of contagion proposed by
https://arxiv.org/pdf/2103.03709.pdf
-----
parameters
hypergraph:
the hypergraph on which the contagion process is run
I_condition:
the initial condition of the contagion process
T:
the number of time steps
------
returns
the fraction of infected nodes at each time step
"""
t = 0
N = len(I_condition)
I_prec = I_condition
sum = 0
for i in hypergraph.get_nodes():
sum += I_condition[i]
total_infected = [sum]
lam_0 = lam_in
delta_0 = delta_in
theta_0 = theta_in
# define a vector delta_exp that contains the delta exponent for each node
delta_exp = np.zeros(N)
edges = hypergraph.get_edges()
while t < T:
# shuffle the edges
random.shuffle(edges)
for edge in edges:
# find the percentage of the infected nodes in the edge
if len(edge) == 2:
node_1 = edge[0]
node_2 = edge[1]
if (I_prec[node_1] + I_prec[node_2]) >= 1:
# poisson process with parameter lambda = lambda_0 * function(size(edge))
# function is log base 2 of the size of the edge
lam = lam_0 * function_len(len(edge))
rep = 1
poisson_rand = np.random.poisson(lam, rep)
p_rand = np.sum(poisson_rand) / rep
# TAG 3
if random.random() < p_rand:
I_prec[node_1] = 1
I_prec[node_2] = 1
else:
infected_nodes = 0
for node in edge:
if I_prec[node] == 1:
infected_nodes += 1
if infected_nodes > threshold_calc(len(edge), theta_0):
# print("contagion process in group:", edge)
for node in edge:
if I_prec[node] == 0:
# poisson process with parameter lambda = lambda_0 * function(size(edge))
# function is log base 2 of the size of the edge
lam = lam_0 * function_len(len(edge))
rep = 1
poisson_rand = np.random.poisson(lam, rep)
p_rand = np.sum(poisson_rand) / rep
# TAG 2
if random.random() < p_rand:
I_prec[node] = 1
for node in hypergraph.get_nodes():
if I_prec[node] == 1:
rep = 1
# d_exp = delta_exp[node]
poisson_rand = np.random.poisson(delta_0, rep)
p_rand = np.sum(poisson_rand) / rep
# TAG 1
if random.random() < p_rand:
I_prec[node] = 0
pass
pass
sum_infected = 0
for node in hypergraph.get_nodes():
sum_infected += I_prec[node]
total_infected.append(sum_infected)
t += 1
total_infected = np.array(total_infected)
total_infected = total_infected[:t]
return total_infected / N
|
francescoboldrin/noise_lib
|
m_contagion.py
|
m_contagion.py
|
py
| 8,449 |
python
|
en
|
code
| 2 |
github-code
|
6
|
2726023308
|
n = int(input())
for _ in range(n):
p = input()
if len(p) == 3:
counter = 0
x = 'one'
for i in range(3):
if p[i] != x[i]:
counter += 1
if counter <= 1:
print(1)
else:
print(2)
else:
print(3)
|
wolney-fo/beecrowd
|
3-STRINGS/python/beecrowd_1332.py
|
beecrowd_1332.py
|
py
| 302 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27082695603
|
import pytest
from httpx import AsyncClient
from fastapi1.main import app
menu_id = ''
def assert_menu_properties(response_json):
if not response_json:
return
assert 'id' in response_json
assert 'title' in response_json
assert 'description' in response_json
@pytest.mark.anyio
async def test_get_menus():
# Получение всех меню
async with AsyncClient(app=app, base_url='http://test') as client:
response = await client.get('/api/v1/menus')
assert response.status_code == 200
assert isinstance(response.json(), list)
@pytest.mark.anyio
async def test_create_menus():
# Тестируем правильно ли создается меню
global menu_id
data = {
'title': 'Menu 1',
'description': 'Description for menu 1'
}
async with AsyncClient(app=app, base_url='http://test') as client:
response = await client.post('/api/v1/menus', json=data)
assert response.status_code == 201
assert_menu_properties(response.json())
created_menu = response.json()
assert created_menu['title'] == 'Menu 1'
assert created_menu['description'] == 'Description for menu 1'
menu_id = created_menu['id']
@pytest.mark.anyio
async def test_update_menu():
# Тест на update
data = {
'title': 'Updated Menu 1',
'description': 'Updated Description 1'
}
async with AsyncClient(app=app, base_url='http://test') as client:
response = await client.patch(f'/api/v1/menus/{menu_id}', json=data)
assert response.status_code == 200
assert_menu_properties(response.json())
updated_menu = response.json()
assert updated_menu['title'] == data['title']
assert updated_menu['description'] == data['description']
@pytest.mark.anyio
async def test_read_menus():
# Тестируем read определенного меню
async with AsyncClient(app=app, base_url='http://test') as client:
response = await client.get(f'/api/v1/menus/{menu_id}')
assert response.status_code == 200
assert_menu_properties(response.json())
response = response.json()
assert response != []
@pytest.mark.anyio
async def test_delete_menu():
# Удаляем меню
async with AsyncClient(app=app, base_url='http://test') as client:
response = await client.delete(f'/api/v1/menus/{menu_id}')
assert response.status_code == 200
@pytest.mark.anyio
async def test_menu_empty():
# Проверяем, что меню пустые
async with AsyncClient(app=app, base_url='http://test') as client:
response = await client.get('/api/v1/menus')
assert response.status_code == 200
assert response.json() == []
@pytest.mark.anyio
async def test_empty_menu_id():
# Проверяем, что респонс пуст
async with AsyncClient(app=app, base_url='http://test') as client:
response = await client.delete(f'/api/v1/menus/{menu_id}')
assert response.status_code == 200
assert response.json() is None
|
puplishe/testproject
|
tests/test_menu.py
|
test_menu.py
|
py
| 3,056 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12050674214
|
"""Define settings for the window"""
from data.options import CUSTOM_SETTINGS_FILENAME
import json
from logger import logger
from os import path
DEFAULT_SETTINGS = {
"width": 1024,
"height": 768
}
def get(_type, data):
"""Used to get value for the settings file
Args:
_type (string)
data (dict)
Returns:
int | str
"""
value = 0
try:
with open(path.join('.', 'assets', 'saved_settings', CUSTOM_SETTINGS_FILENAME), 'r') as _f:
value = json.load(_f)[_type]
except:
value = data[_type]
logger.info("Load %s : %s", _type, value)
return value
WIDTH = get("width", DEFAULT_SETTINGS)
HEIGHT = get("height", DEFAULT_SETTINGS)
FPS = 60
TITLE = 'Donjons et Dragons'
TILESIZE = 64
|
Barbapapazes/dungeons-dragons
|
config/window.py
|
window.py
|
py
| 779 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36078929738
|
import requests
from sqlalchemy import or_
from flask import Blueprint
from flask_login import current_user
from flask import redirect, request, render_template, url_for, jsonify
from models import User, MealRequest, Proposal
from dbSession import session
from loginAPIKeyDecorator import require_api_key
from keyHelper import get_foursquare_key_dict, get_mapquest_key_dict
app_endpoints = Blueprint('app_endpoints', __name__)
mapquest_key_dict = get_mapquest_key_dict()
foursquare_key_dict = get_foursquare_key_dict()
@app_endpoints.route('/v1/users', methods = ['GET', 'PUT', 'DELETE'])
@require_api_key
def get_all_users():
if request.method == 'GET':
user_list = session.query(User).all()
if user_list is not None:
return jsonify([user.serialize for user in user_list])
else:
return 'None'
elif request.method == 'PUT':
username = request.json.get('user_name')
new_password = request.json.get('password')
new_token = request.json.get('token')
if username is not None:
current_user = session.query(User).filter_by(user_name=username).first()
else:
current_user = None
if current_user is not None:
if new_password is not None:
current_user.hash_password(new_password)
if new_token is not None:
current_user.api_key = new_token
session.commit()
return jsonify(dict(message="Success, updated user: {}!".format(username))),201
else:
return jsonify(dict(message="ERROR, not all parameter provided!")),404
elif request.method == 'DELETE':
username = request.json.get('user_name')
if username is not None:
current_user = session.query(User).filter_by(user_name=username).first()
else:
current_user = None
if current_user is not None:
session.delete(current_user)
session.commit()
return jsonify(dict(message="Success, deleted user: {}!".format(username))),200
else:
return jsonify(dict(message="ERROR, user not found or not provided!")),404
@app_endpoints.route('/v1/users/<int:id>', methods=['GET'])
@require_api_key
def get_user_with_id(id):
user_search = session.query(User).filter_by(id=id).first()
if user_search is not None:
return jsonify(user_search.serialize),200
else:
return jsonify(dict(message="ERROR, user {} not found!".format(id))),404
def update_meal_request(MealRequest):
response = requests.get('https://api.foursquare.com/v2/venues/search',params={**foursquare_key_dict, 'v':'20180323', 'limit':1,
'near':MealRequest.location_area,
'query':MealRequest.meal_type})
if response.status_code != 200:
return None, response.status_code
MealRequest.location_name = response.json().get('response').get('venues')[0].get('name')
MealRequest.latitude = response.json().get('response').get('geocode').get('feature').get('geometry').get('center').get('lat')
MealRequest.longitude = response.json().get('response').get('geocode').get('feature').get('geometry').get('center').get('lng')
return MealRequest,response.status_code
@app_endpoints.route('/v1/requests', methods = ['GET', 'POST'])
@require_api_key
def show_make_user_requests():
if request.method == 'GET':
meal_requests = session.query(MealRequest).all()
if meal_requests is not None:
return jsonify( [req.serialize for req in meal_requests])
else:
return 'None'
elif request.method == 'POST':
try:
new_meal_request = MealRequest(**request.json,user_id=current_user.id)
new_meal_request,status_code = update_meal_request(new_meal_request)
if status_code == 200:
current_user.meal_requests.append(new_meal_request)
session.commit()
return jsonify(dict(message="Success, created request: {}!".format(new_meal_request.id))),201
else:
return jsonify(dict(message="ERROR, foursquare api not working {}!".format(status_code))),404
except ValueError as value_error:
return jsonify(dict(message=value_error.args)),404
@app_endpoints.route('/v1/requests/<int:id>', methods = ['GET', 'PUT', 'DELETE'])
@require_api_key
def show_make_edit_specific_user_request(id):
request_query = session.query(MealRequest).filter_by(id=id).first()
if request_query == None:
return 'None'
if request.method == 'GET':
return jsonify(request_query.serialize),200
if request.method == 'PUT':
meal_type = request.json.get('meal_type')
location_area = request.json.get('location_area')
appointment_date= request.json.get('appointment_date')
meal_time = request.json.get('meal_time')
if meal_type is not None:
request_query.meal_type=meal_type
if location_area is not None:
request_query.location_area=location_area
if appointment_date is not None:
request_query.appointment_date=appointment_date
if meal_time is not None:
request_query.meal_time=meal_time
request_query, status_code = update_meal_request(request_query)
if status_code == 200:
session.commit()
return jsonify(dict(message="Success, updated request: {}!".format(request_query.id))),201
else:
return jsonify(dict(message="ERROR, foursquare api not working {}!".format(status_code))),404
elif request.method == 'DELETE':
session.delete(request_query)
session.commit()
return jsonify(dict(message="Success, deleted request: {}!".format(request_query.id))),200
@app_endpoints.route('/v1/proposals', methods=['GET', 'POST'])
@require_api_key
def show_and_create_user_porposals():
if request.method == 'GET':
proposals_query=session.query(Proposal).filter(or_(Proposal.user_porposed_to==current_user.user_name, Proposal.user_porposed_from==current_user.user_name)).all()
return jsonify([elements.serialize for elements in proposals_query]),200
elif request.method == 'POST':
proposal_request_id = request.json.get('request_id')
current_meal_request = session.query(MealRequest).filter_by(id=proposal_request_id).first()
if current_meal_request is None:
return jsonify(dict(message="ERROR, request id {} not found".format(proposal_request_id))), 404
meal_request_creater = session.query(User).filter_by(id=current_meal_request.user_id).first()
if session.query(Proposal).filter_by(request_id=proposal_request_id).first() is None:
new_proposal = Proposal(user_porposed_from=current_user.user_name,
user_porposed_to=meal_request_creater.user_name,
meal_request=current_meal_request)
session.add(new_proposal)
session.commit()
return jsonify(dict(message="Success, created proposal: {}!".format(new_proposal.request_id))),201
else:
return jsonify(dict(message="ERROR, request id {} does already exist".format(proposal_request_id))), 404
@app_endpoints.route('/v1/proposals/<int:id>', methods=['GET', 'POST'])
@require_api_key
def show_modify_delete_specific_proposal(id):
proposal_query = session.query(Proposal).filter(
or_(Proposal.user_porposed_from == current_user.user_name,
Proposal.user_porposed_to == current_user.user_name)).first()
if proposal_query == []:
print('asd')
|
NaPiZip/Online-course-notes
|
Designing_RESTful_APIs/Exercices/L5/appEndpoints.py
|
appEndpoints.py
|
py
| 7,871 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74606056828
|
from __future__ import unicode_literals
import errno
import json
import logging
import os
__author__ = 'Jakub Plichta <[email protected]>'
logger = logging.getLogger(__name__)
class DashboardExporter(object):
def process_dashboard(self, project_name, dashboard_name, dashboard_data):
pass
class ProjectProcessor(object):
def __init__(self, dashboard_processors):
"""
:type dashboard_processors: list[grafana_dashboards.builder.DashboardExporter]
"""
super(ProjectProcessor, self).__init__()
self._dashboard_processors = dashboard_processors
def process_projects(self, projects, parent_context=None):
"""
:type projects: list[grafana_dashboards.components.projects.Project]
:type parent_context: dict
"""
for project in projects:
logger.info("Processing project '%s'", project.name)
for context in project.get_contexts(parent_context):
for dashboard in project.get_dashboards():
json_obj = dashboard.gen_json(context)
dashboard_name = context.expand_placeholders(dashboard.name)
for processor in self._dashboard_processors:
processor.process_dashboard(project.name, dashboard_name, json_obj)
class FileExporter(DashboardExporter):
def __init__(self, output_folder):
super(FileExporter, self).__init__()
self._output_folder = output_folder
if not os.path.exists(self._output_folder):
os.makedirs(self._output_folder)
if not os.path.isdir(self._output_folder):
raise Exception("'{0}' must be a directory".format(self._output_folder))
def process_dashboard(self, project_name, dashboard_name, dashboard_data):
super(FileExporter, self).process_dashboard(project_name, dashboard_name, dashboard_data)
dirname = os.path.join(self._output_folder, project_name)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
dashboard_path = os.path.join(dirname, dashboard_name + '.json')
logger.info("Saving dashboard '%s' to '%s'", dashboard_name, os.path.abspath(dashboard_path))
with open(dashboard_path, 'w') as f:
json.dump(dashboard_data, f, sort_keys=True, indent=2, separators=(',', ': '))
|
jakubplichta/grafana-dashboard-builder
|
grafana_dashboards/exporter.py
|
exporter.py
|
py
| 2,430 |
python
|
en
|
code
| 141 |
github-code
|
6
|
12940641997
|
import os
import csv
#Load file path
csvpath = os.path.join('Resources','election.csv')
#Placeholders for votes and candidates
total_votes = 0
vote_total = []
candidates_name = []
each_vote = []
percent_of_vote = []
#open file and read in results
with open(csvpath) as election_csv:
csvreader = csv.reader(election_csv, delimiter=',')
csv_header =next(csvreader)
#add vote count
for row in csvreader:
total_votes += 1
#if candidates name not found skip if it is found add to vote count
if row[2] not in candidates_name:
candidates_name.append(row[2])
index = candidates_name.index(row[2])
each_vote.append(1)
else:
index = candidates_name.index(row[2])
each_vote[index] = each_vote[index] + 1
#calculate vote percentage and format
for votes in each_vote:
percentage = (votes/total_votes)
percentage = "{:.3%}".format(percentage)
percent_of_vote.append(percentage)
candidate = max(each_vote)
index = each_vote.index(candidate)
winning_candidate = candidates_name[index]
#show candidates results in output
print("Election Results")
print("--------------------------")
print("Total Votes: " + str(total_votes))
print("--------------------------")
for i in range(len(candidates_name)):
print(candidates_name[i] + ": " + (str(percent_of_vote[i])) + " (" + (str(each_vote[i])) + ")")
print("--------------------------")
print("Winner: " + winning_candidate)
print("--------------------------")
#output election results to csv
output_path = os.path.join("analysis", "election_results.txt")
with open (output_path,"w") as file:
file.write ("Election Results")
file.write ("\n")
file.write ("----------------------------")
file.write ("\n")
file.write (f"Total Votes: " + str(total_votes))
file.write ("\n")
file.write ("--------------------------")
file.write ("\n")
for i in range(len(candidates_name)):
line = ((candidates_name[i] + ": " + (str(percent_of_vote[i])) + " (" + (str(each_vote[i])) + ")"))
file.write('{}\n'.format(line))
file.write ("--------------------------")
file.write ("\n")
file.write (f"Winner: " + winning_candidate)
file.write ("\n")
file.write ("--------------------------")
|
Anastefanski/Python_Challenge
|
PyPoll/main.py
|
main.py
|
py
| 2,398 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19654989478
|
"""
练习1:百分制成绩转换为等级制成绩。
要求:如果输入的成绩在90分以上(含90分)输出A;
80分-90分(不含90分)输出B;
70分-80分(不含80分)输出C;
60分-70分(不含70分)输出D;
60分以下输出E。
"""
score = float(input("请输入百分制成绩:"))
if (score >= 90):
print("A")
elif (score >= 80):
print("B")
elif (score >= 70):
print("C")
elif (score >= 60):
print("D")
else:
print("E")
"""
输入两个正整数,计算它们的最大公约数和最小公倍数
"""
x = int(input('x = '))
y = int(input('y = '))
if x > y:
x, y = y, x
for factor in range(x, 0, -1):
if (x % factor == 0 and y % factor == 0):
print("x和y的最大公约数为:%d" % factor)
print("x和y的最大公倍数为: %d" % (x * y // factor))
break
|
StreamAzure/PythonPractice
|
0x01-basic/practices2.py
|
practices2.py
|
py
| 850 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
9379980340
|
# 2022.05.02
# 풀이 시간 분 초
# 채점 결과: 시간 초과
# 시간복잡도: O(N)
# 문제 링크: https://www.acmicpc.net/problem/3955
import sys
input = sys.stdin.readline
t = int(input())
for _ in range(t):
k, c = map(int, input().split())
possible = False
people = 0
for i in range(int(1e9)):
if (k * i + 1) % c == 0:
possible = True
people = i
break
if possible:
print((k * people + 1) // c)
else:
print('IMPOSSIBLE')
|
Source-Machine-Ent/Algorithm-class
|
ningpop/3955.py
|
3955.py
|
py
| 519 |
python
|
ko
|
code
| 2 |
github-code
|
6
|
9199268826
|
from imageai.Detection import VideoObjectDetection
import os
execution_path = os.getcwd()
detector = VideoObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(os.path.join(execution_path, "resnet50_coco_best_v2.1.0.h5"))
detector.loadModel()
detections = detector.detectObjectsFromVideo(input_file_path=os.path.join(execution_path, "walk.mp4"),
output_file_path=os.path.join(execution_path, "resultat"))
for eachObject in detections:
print(eachObject["name"], " : ", eachObject["percentage_probability"])
|
cegepmatane/projet-graphique-ai-tracking
|
poc/tensorflow_video/demo.py
|
demo.py
|
py
| 577 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12866551280
|
import random
array = [random.randint(0,50) for i in range (9) ]
print (array)
def quickSort(arr):
if len(arr) < 1:
return arr
pivot_index = random.randint(0, len(arr) - 1)
left = []
mid = [arr[pivot_index]]
right = []
for i in range(len(arr)):
if i != pivot_index:
if arr[pivot_index] > arr[i]:
left.append(arr[i])
elif arr[pivot_index] < arr[i]:
right.append(arr[i])
else:
mid.append(arr[i])
return quickSort(left) + mid + quickSort(right)
print (quickSort(array))
|
tyao117/AlgorithmPractice
|
QuickSort.py
|
QuickSort.py
|
py
| 595 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18992770732
|
"""
编写一个函数来查找字符串数组中的最长公共前缀。
如果不存在公共前缀,返回空字符串 ""。
示例 1:
输入:strs = ["flower","flow","flight"]
输出:"fl"
示例 2:
输入:strs = ["dog","racecar","car"]
输出:""
解释:输入不存在公共前缀。
"""
import typing
class Solution:
def longestCommonPrefix(self, strs)->str:
l = len(strs)
if l <= 0:
return ""
if l <= 1:
return strs[0]
check = strs[0]
start = 0
s = len(check)
for i in range(s):
for j in range(l):
if j == 0:
continue
if len(strs[j]) <= i or check[i] != strs[j][i]:
return check[start: i]
return check
if __name__ == "__main__":
print(Solution().longestCommonPrefix(["flower", "flow", "flight"]))
print(Solution().longestCommonPrefix(["dog","racecar","car"]))
|
coolzyz/leetcode
|
14.py
|
14.py
|
py
| 988 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10888914316
|
import random
from django.template.loader import render_to_string
from .naver import 블로그_검색, 상한가_크롤링, 테마별_시세_크롤링
def search(search_engine, keyword):
if search_engine == '네이버 블로그':
post_list = 블로그_검색(keyword)
response_text = render_to_string('dialogflow/naver_blog_search_result.txt', {
'post_list': post_list[:3],
})
else:
response_text = '{}는 지원하지 않습니다.'.format(search_engine)
return {'fulfillmentText': response_text}
def stock_search(stock_search_term):
if stock_search_term == '상한가 종목':
response_text = 상한가_크롤링()
elif stock_search_term == '테마별 시세':
response_text = 테마별_시세_크롤링()
else:
response_text = '{}는 지원하지 않습니다.'.format(stock_search_term)
return {'fulfillmentText': response_text}
|
allieus-archives/demo-20180805-startup-dev
|
dialogflow/actions.py
|
actions.py
|
py
| 937 |
python
|
ko
|
code
| 16 |
github-code
|
6
|
74199623228
|
from tkinter import messagebox
from PyPDF2 import PdfReader, PdfWriter
import os
class TrabajarPDF:
def divide_pdf(self, rutaPDF, rutaGuardar, num_pages):
pdf_reader = PdfReader(rutaPDF)
total_pages = len(pdf_reader.pages)
for i in range(0, total_pages, num_pages):
pdf_writer = PdfWriter()
for j in range(i, min(i + num_pages, total_pages)):
pdf_writer.add_page(pdf_reader.pages[j])
output_filename = os.path.join(rutaGuardar, f"{os.path.basename(rutaPDF)}_{i // num_pages + 1}.pdf")
with open(output_filename, "wb") as output_file:
pdf_writer.write(output_file)
messagebox.showinfo("Éxito", "El PDF se ha dividido correctamente.")
|
JhostinR/emergia_projects
|
dividir_unir_pdf/controller/dividir_functions.py
|
dividir_functions.py
|
py
| 753 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24428391900
|
#!/usr/bin/python3
""" sends a post request to the URL and displays the body
"""
import requests
from sys import argv
if __name__ == "__main__":
url = argv[1]
r = requests.get(url)
if r.status_code == 200:
print(r.text)
else:
print("Error code: {}".format(r.status_code))
|
Isaiah-peter/alx-higher_level_programming
|
0x11-python-network_1/7-error_code.py
|
7-error_code.py
|
py
| 305 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70508505789
|
import os
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
from matplotlib import pyplot as plt
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
# progress bar
from tqdm.auto import tqdm
# hyper parameter
train_batch_size = 64
test_batch_size = 1000
epochs = 15
lr = 0.0003
gamma = 0.7
myseed = 1220
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = torch.flatten(x, end_dim=0)
return output
def run():
# CUDA, macOS GPU or CPU ?
if torch.cuda.is_available():
device = torch.device("cuda")
print('Using cuda')
elif torch.backends.mps.is_available():
device = torch.device("mps")
print('Using mps')
else:
device = torch.device("cpu")
print('Using cpu')
# add random seed
seed = myseed
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
# download data
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST('./data', train=True, download=True, transform=transform)
dataset2 = datasets.MNIST('./data', train=False, transform=transform)
train_loader = DataLoader(dataset1, batch_size=train_batch_size, shuffle=True, num_workers=1, pin_memory=True)
test_loader = DataLoader(dataset2, batch_size=test_batch_size, shuffle=True, num_workers=1, pin_memory=True)
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5)
criterion = nn.CrossEntropyLoss()
scheduler = StepLR(optimizer, step_size=1, gamma=gamma)
# record the performance
epoches = []
training_accuracies = []
training_loss = []
testing_accuracies = []
testing_loss = []
training_accuracies.append('Training accuracies')
training_loss.append('Training loss')
testing_accuracies.append('Testing accuracies')
testing_loss.append('Testing loss')
# ---------- train and validate ----------
for epoch in range(1, epochs + 1):
total_train_loss, total_train_acc = 0.0, 0.0
total_test_loss, total_test_acc = 0.0, 0.0
train_batch_idx, test_batch_idx = 0, 0
tqdm.write("[ epoch " + str(epoch) + " ]")
epoches.append(epoch)
# ---------- train ----------
model.train()
for batch in tqdm(train_loader, file=sys.stdout):
train_batch_idx += 1
if train_batch_idx == 1:
tqdm.write("Training")
imgs, labels = batch
outputs = model(imgs.to(device))
loss = criterion(outputs, labels.to(device))
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=10)
optimizer.step()
acc = (outputs.argmax(dim=-1) == labels.to(device)).float().mean()
total_train_loss += loss.item()
total_train_acc += acc.item()
train_loss = total_train_loss / train_batch_idx
train_acc = total_train_acc / train_batch_idx
# ---------- validate ----------
model.eval()
for batch in tqdm(test_loader, file=sys.stdout):
if test_batch_idx == 1:
tqdm.write("Testing")
test_batch_idx += 1
imgs, labels = batch
with torch.no_grad():
outputs = model(imgs.to(device))
loss = criterion(outputs, labels.to(device))
acc = (outputs.argmax(dim=-1) == labels.to(device)).float().mean()
total_test_loss += loss.item()
total_test_acc += acc.item()
test_loss = total_test_loss / test_batch_idx
test_acc = total_test_acc / test_batch_idx
training_accuracies.append(train_acc)
training_loss.append(train_loss)
testing_accuracies.append(test_acc)
testing_loss.append(test_loss)
print('Training Loss:', training_loss[epoch], 'Training Accuracy:', (100 * training_accuracies[epoch]),
'%')
print('Testing Loss:', testing_loss[epoch], 'Testing Accuracy:', 100 * testing_accuracies[epoch], '%')
print('')
scheduler.step()
plot(epoches, training_loss)
plot(epoches, training_accuracies)
plot(epoches, testing_accuracies)
plot(epoches, testing_loss)
def plot(epoches, performance):
label = performance.pop(0)
plt.title(label)
plt.plot(epoches, performance, label=label)
plt.xlabel('epoches')
plt.legend()
plt.savefig(label + '.jpg')
plt.show()
if __name__ == '__main__':
run()
|
WangShengqing122090536/CSC1004-image-classification-modified
|
main.py
|
main.py
|
py
| 5,477 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27682702748
|
########################################################################
# BF3Events
#
# There should be a class in here for every possible event in the log:
# PlayerKilled
# PlayerJoin
# PlayerLeave
# PlayerSuicide
# PlayerSwitchedTeams
# PlayerSwitchedSquads
########################################################################
class BF3BaseEvent:
def toCsv(self):
#Standard toCsv function for events with only an eventDate/Time/playerName.
return "NULL,\"%s\",\"%s\",\"%s\"\n" % (self.eventDate, self.eventTime, self.playerName)
class PlayerJoinEvent(BF3BaseEvent):
#eventDate: date of event
#eventTime: time of event
#playerName: player name
def __init__(self, type, eventData):
self.type = type
self.tableName = 'playerjoin'
splitLine = eventData.split("\t")
splitTime = splitLine[1].split(" ")
#get the time
self.eventDate = logDateToSqlDate(splitTime[0])
self.eventTime = splitTime[1]
self.playerName = splitLine[4].split(" ")[0].rstrip("\n")
class PlayerLeaveEvent(BF3BaseEvent):
#eventDate: date of event
#eventTime: time of event
#playerName: player name
def __init__(self, type, eventData):
self.type = type
self.tableName = 'playerleave'
splitLine = eventData.split("\t")
splitTime = splitLine[1].split(" ")
#get the time
self.eventDate = logDateToSqlDate(splitTime[0])
self.eventTime = splitTime[1]
self.playerName = splitLine[4].split(" ")[0].rstrip("\n")
class PlayerSuicideEvent(BF3BaseEvent):
#eventDate: date of event
#eventTime: time of event
#playerName: player name
def __init__(self, type, eventData):
self.type = type
self.tableName = 'playersuicide'
splitLine = eventData.split("\t")
splitTime = splitLine[1].split(" ")
#get the time
self.eventDate = logDateToSqlDate(splitTime[0])
self.eventTime = splitTime[1]
self.playerName = splitLine[4].split(" ")[0].rstrip("\n")
class PlayerSwitchedTeamsEvent(BF3BaseEvent):
#eventDate: date of event
#eventTime: time of event
#playerName: player name
#oldTeam: old team
#newTeam: new team
def __init__(self, type, eventData):
self.type = type
self.tableName = 'playerswitchedteams'
splitLine = eventData.split("\t")
splitTime = splitLine[1].split(" ")
#get the time
self.eventDate = logDateToSqlDate(splitTime[0])
self.eventTime = splitTime[1]
extData = splitLine[4].split(" ")
self.playerName = extData[0]
if extData[4] == "Neutral":
self.oldTeam = "Neutral"
self.newTeam = extData[6] + " " + extData[7]
else:
self.oldTeam = extData[4] + " " + extData[5]
self.newTeam = extData[7] + " " + extData[8]
def toCsv(self):
return "NULL,\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"" %\
(self.eventDate, self.eventTime, self.playerName, self.oldTeam, self.newTeam)
class PlayerSwitchedSquadsEvent(BF3BaseEvent):
#eventDate: date of event
#eventTime: time of event
#playerName: player name
#oldSquad: old team
#newSquad: new team
def __init__(self, type, eventData):
self.type = type
self.tableName = 'playerswitchedsquads'
splitLine = eventData.split("\t")
splitTime = splitLine[1].split(" ")
#get the time
self.eventDate = logDateToSqlDate(splitTime[0])
self.eventTime = splitTime[1]
extData = splitLine[4].split(" ")
self.playerName = extData[0]
self.oldSquad = extData[4]
self.newSquad = extData[6]
def toCsv(self):
return "NULL,\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"" %\
(self.eventDate, self.eventTime, self.playerName, self.oldSquad, self.newSquad)
class PlayerKilledEvent(BF3BaseEvent):
#A PlayerKilled event consists of the following data:
#eventDate: date of event
#eventTime: time of event
#playerName: killer name
#victim: victim name
#weapon: the murder weapon
#headshot: was it a headshot true/false?
#Input is a de-nulled event line from the log file.
def __init__(self, type, deathLine):
#set the type
self.type = type
self.tableName = 'playerkilled'
splitLine = deathLine.split("\t")
splitTime = splitLine[1].split(" ")
self.eventDate = logDateToSqlDate(splitTime[0])
self.eventTime = splitTime[1]
#is it a headshot?
if 'headshot' in splitLine[4]:
self.headshot = True
else:
self.headshot = False
#now get killer/victim/weapon out of splitLine[4]
#splitLine[4] looks like:
# Capped1 killed IncredulousDylan [{MISSING: global.Weapons.svd} | -HEADSHOT-]\n
#or
# Scinon killed Sosnitoonsa [Roadkill]
splitKill = splitLine[4].split(" ")
self.playerName = splitKill[0]
self.victim = splitKill[2]
#special cases because DICE is inconsistent :dice:
if 'Roadkill' in splitLine[4]:
self.weapon = "Roadkill"
elif 'Assault' in splitLine[4]:
self.weapon = "F2000 Assault"
elif 'LMG' in splitLine[4]:
self.weapon = "M60 LMG"
elif 'SAW' in splitLine[4]:
self.weapon = "M249 SAW"
elif 'Snayperskaya' in splitLine[4]:
self.weapon = "SV98 Snayperskaya"
elif 'Combat' in splitLine[4]:
self.weapon = "870 Combat"
elif 'M1911' in splitLine[4]:
self.weapon = "WWII M1911 .45"
elif 'Pistol' in splitLine[4]:
self.weapon = "M9 Pistol"
else:
self.weapon = splitKill[4].rstrip("}]\n")
def toCsv(self):
#format: date,time,killer,victim,weapon,headshot
return "NULL,\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"\n" \
% (self.eventDate, self.eventTime, self.playerName, self.victim, self.weapon, self.headshot)
def writeCsv(eventList):
type = eventList[0].type #fetch the type of event list we've been sent and use that as the CSV name
outputFile = open((type + ".csv"), "a")
for event in eventList:
csvData = event.toCsv()
outputFile.write(csvData)
outputFile.close()
def sqlInsertEvents(db, eventList):
type = eventList[0].type #fetch the type of event list we've been sent and use that as the CSV name
outputFile = open((type + ".csv"), "a")
if eventList[0].tableName != 'playerkilled':
for event in eventList:
db.query("INSERT INTO %s VALUES(NULL, '%s', '%s', '%s')" % (event.tableName, event.eventDate, event.eventTime, db.escape_string(event.playerName)))
else:
for event in eventList:
db.query("INSERT INTO playerkilled VALUES(NULL, '%s', '%s', '%s', '%s', '%s', '%s')" % (event.eventDate, event.eventTime, db.escape_string(event.playerName), db.escape_string(event.victimName), db.escape_string(event.weapon), event.headshot))
outputFile.close()
def logDateToSqlDate(date):
sqlDate = (date[6:10] + "-" + date[0:2] + "-" + date[3:5])
return sqlDate
|
Luigi30/WookParser
|
BF3Events.py
|
BF3Events.py
|
py
| 7,284 |
python
|
en
|
code
| 1 |
github-code
|
6
|
44399481784
|
import gym
from collections import deque
import numpy as np
import time
import torch
torch.manual_seed(0) # set random seed
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from policy import Policy
from gym.wrappers.monitoring.video_recorder import VideoRecorder
env = gym.make('Acrobot-v1')
env.seed(0)
print('observation space:', env.observation_space)
print('action space:', env.action_space)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
policy = Policy().to(device)
optimizer = optim.Adam(policy.parameters(), lr=0.001)
def reinforce(n_episodes=5000, max_t=1000, gamma=1.0, print_every=100):
scores_deque = deque(maxlen=100)
scores = []
for i_episode in range(1, n_episodes+1):
saved_log_probs = []
rewards = []
state = env.reset()
for t in range(max_t):
action, log_prob = policy.act(state)
saved_log_probs.append(log_prob)
state, reward, done, _ = env.step(action)
rewards.append(reward)
if done:
break
scores_deque.append(sum(rewards))
scores.append(sum(rewards))
discounts = [gamma**i for i in range(len(rewards)+1)]
R = sum([a*b for a,b in zip(discounts, rewards)])
policy_loss = []
for log_prob in saved_log_probs:
policy_loss.append(-log_prob * R)
policy_loss = torch.cat(policy_loss).sum()
optimizer.zero_grad()
policy_loss.backward()
optimizer.step()
if i_episode % print_every == 0:
torch.save(policy.state_dict(), 'checkpoint.pth')
print('Episode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))
return scores
scores = reinforce()
|
david-wb/acrobot-v1
|
train.py
|
train.py
|
py
| 1,870 |
python
|
en
|
code
| 2 |
github-code
|
6
|
855819574
|
#!/usr/bin/env python
#
# This example creates a polygonal model of a cone, and then renders it to
# the screen. It will rotate the cone 360 degrees and then exit. The basic
# setup of source -> mapper -> actor -> renderer -> renderwindow is
# typical of most VTK programs.
#
#
# First we include the VTK Python packages that will make available
# all of the VTK commands to Python.
#
import vtk
import time
#
# Next we create an instance of vtkConeSource and set some of its
# properties. The instance of vtkConeSource "cone" is part of a visualization
# pipeline (it is a source process object); it produces data (output type is
# vtkPolyData) which other filters may process.
#
cone = vtk.vtkConeSource()
cone.SetHeight( 3.0 )
cone.SetRadius( 1.0 )
cone.SetResolution( 10 )
#
# In this example we terminate the pipeline with a mapper process object.
# (Intermediate filters such as vtkShrinkPolyData could be inserted in
# between the source and the mapper.) We create an instance of
# vtkPolyDataMapper to map the polygonal data into graphics primitives. We
# connect the output of the cone souece to the input of this mapper.
#
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection( cone.GetOutputPort() )
#
# Create an actor to represent the cone. The actor orchestrates rendering of
# the mapper's graphics primitives. An actor also refers to properties via a
# vtkProperty instance, and includes an internal transformation matrix. We
# set this actor's mapper to be coneMapper which we created above.
#
coneActor = vtk.vtkActor()
coneActor.SetMapper( coneMapper )
#
# Create the Renderer and assign actors to it. A renderer is like a
# viewport. It is part or all of a window on the screen and it is
# responsible for drawing the actors it has. We also set the background
# color here
#
ren1= vtk.vtkRenderer()
ren1.AddActor( coneActor )
ren1.SetBackground( 0.1, 0.2, 0.4 )
#
# Finally we create the render window which will show up on the screen
# We put our renderer into the render window using AddRenderer. We also
# set the size to be 300 pixels by 300
#
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer( ren1 )
renWin.SetSize( 300, 300 )
#
# now we loop over 360 degreeees and render the cone each time
#
# for i in range(0,360):
# time.sleep(0.03)
# renWin.Render()
# ren1.GetActiveCamera().Azimuth( 1 )
|
VisTrails/VisTrails
|
examples/vtk_examples/Tutorial/Step1/Cone.py
|
Cone.py
|
py
| 2,362 |
python
|
en
|
code
| 100 |
github-code
|
6
|
24502025231
|
# Handle options
"""
Use the concept of optionspec from bup.
Get a optionspec from subcmd, use it to parse a string of options
An example of optionspec:
'scrive cmd [ -o ]
---
o,option=: description
,no_short: no short name
'
"""
import getopt
from libscrive.helpers import log
class OptDict:
"""
options =
{
option1: value1
option2: True
}
aliases =
{
o1: option1
o2: option2
}
"""
def __init__(self, aliases):
self._options = {}
self._aliases = aliases
def __setitem__(self, option, value):
option = self._unalias(option)
self._options[option] = value
def __getitem__(self, option):
option = self._unalias(option)
return self._options.get(option, None)
def _unalias(self, option):
return self._aliases.get(option, option) # if option is not an alias, return option
class Options:
"""Init from a option spec, can be used to parse option string"""
def __init__(self, optionspec):
"""Construct an option string"""
self._spec = optionspec
self._aliases = {}
self._short_opts = ''
self._long_opts = []
self._usagestr = ''
self._parse_spec() # set self._usagestr, self._options and self._aliases
def _parse_spec(self):
"""
Parse the optionspec, generate usagestr and a set of OptDict
"""
lines = [ s.strip() for s in self._spec.strip().split('\n')]
helplines = []
while lines:
l = lines.pop() # In reverse order
if l == '---': break # The rest is usage string
helplines.append(self._parse_spec_line(l))
helplines.reverse()
lines.reverse()
self._usagestr = '\n'.join(lines + helplines)
def _parse_spec_line(self, line):
"""Parse a option line in an optionspec"""
opts, desc = line.split(": ") # Get option's short/long names and its description
opts = opts.split(",")
has_short = True
if opts[0] == '':
has_short = False
del opts[0]
if opts[-1].endswith('='): # Takes a value
has_value = True
opts[-1]=opts[-1][:-1]
else:
has_value = False
if has_short:
self._short_opts += opts[0] + (':' if has_value else '')
else:
self._long_opts.append(opts[0] + ('=' if has_value else ''))
if len(opts) > 1: # Has aliases
for o in opts[1:]:
if o in self._aliases:
raise KeyError("Option '{}': alias {} already set to {}".format(opts[0], o, self._aliases[o]))
self._aliases[o] = opts[0]
self._long_opts.append(o + ('=' if has_value else ''))
return("{:<10} {}".format("|".join(opts), desc))
def parse(self, arglist):
"""Parse an argument list according to option spec"""
raw_opts, remains = getopt.getopt(arglist, self._short_opts, self._long_opts)
opts = OptDict(self._aliases)
for k,v in raw_opts:
if v == '': v = True
opts[k.lstrip('-')] = v
return opts, remains
def usage(self, msg=''):
log(self._usagestr)
if msg:
log(msg)
|
wenxin-wang/scrive
|
libscrive/options.py
|
options.py
|
py
| 3,293 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43721480783
|
import numpy as np
from config import *
from KnowledgeSent import KnowledgeSentence
from transformers import BertTokenizer, BertModel
import torch
hops = FLAGS.hops
string_year = str(FLAGS.year)
number = 0
start = FLAGS.start
end = FLAGS.end
if string_year == '2015':
number = 3864
elif string_year == '2016':
number = 5640
class Embeddings(object):
'''
For each input sentence:
- Token embedding of each token
- Segment embedding
- Position embedding
- Visibility matrix
'''
def __init__(self, sofpos=True, vm=True):
self.sentences = [] # list of list of all tokens for a sentence
self.visible_matrices = []
self.vm_tensors = [] # a list of visible matrices for each sentence in tensor-shape of 1*token_numb*token_numb.
self.soft_positions = [] # list of list of softpositions for each sentence
self.segments = [] # list of list of segments for each sentence
self.embeddings = [] # a list with intial embeddings for each token in tensor-shape of 1*token_numb*768
self.hidden_states = [] # list of hidden states
self.token_hidden_states = [] # list with for each token, the token and the hidden states
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.model = BertModel.from_pretrained('bert-base-uncased', output_hidden_states=True)
self.model.eval()
self.sofpos = sofpos
self.vm = vm
def makeEmbeddings(self):
count = 0
with open('data/externalData/' + 'raw_data' + string_year + '.txt', 'r') as raw_data:
line_list = raw_data.readlines()
for i in range(start*3, min(number, end*3)):
if i % 600 == 0:
print('another 200, ({}/{})'.format(i, min(number, end*3)))
if count % 3 == 0 : # if it is a sentence line
# add CLS and SEP and remove target sign
sentence = "[CLS] " + line_list[i].replace('$T$', line_list[i+1].replace('\n', '')) + " [SEP]"
# add no knowledge
sent = KnowledgeSentence(sentence, hops, self.tokenizer, include_knowledge=False)
self.sentences.append(sent.sentence)
self.soft_positions.append(sent.soft_positions)
self.segments.append(sent.segments)
self.visible_matrices.append(sent.visibility_matrix)
else: # if it is a target line or sentiment line
pass
count += 1
# append the raw test data and add knowledge
for i in range(max(number, start*3), end*3):
if i % 600 == 0:
print('another 200 ({}/{})'.format(i, end*3))
if count % 3 == 0: # if it is a sentence line
# add CLS and SEP and replace $T$-token with target-token
sentence = "[CLS] " + line_list[i].replace('$T$', line_list[i+1].replace('\n', '')) + " [SEP]"
# add knowledge to sentence
know_sent = KnowledgeSentence(sentence, hops, self.tokenizer, include_knowledge=True)
self.sentences.append(know_sent.sentence)
self.soft_positions.append(know_sent.soft_positions)
self.segments.append(know_sent.segments)
if self.vm:
self.visible_matrices.append(know_sent.visibility_matrix)
else:
self.visible_matrices.append(np.ones((len(know_sent.sentence), len(know_sent.sentence))))
else: # if it is a target line or sentiment line
pass
count += 1
print('Creating embeddings...')
for i in range(len(self.sentences)):
token_tensor = torch.tensor([self.tokenizer.convert_tokens_to_ids(self.sentences[i])])
segment_tensor = torch.tensor([self.segments[i]])
pos_tensor = torch.tensor([self.soft_positions[i]])
if self.sofpos:
output = self.model(token_tensor, None, segment_tensor, pos_tensor)
else:
output = self.model(token_tensor, None, segment_tensor, None)
tensor = output.hidden_states.__getitem__(00)
self.embeddings.append(tensor)
self.vm_tensors.append(torch.tensor(self.visible_matrices[i]))
print('Embeddings created!')
|
Felix0161/KnowledgeEnhancedABSA
|
Embeddings.py
|
Embeddings.py
|
py
| 4,594 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4715617180
|
from square.configuration import Configuration
from square.client import Client
from django.contrib.auth.models import User
from django.conf import settings
import datetime
from pytz import utc as utc
from django.utils import timezone
from dateutil.relativedelta import relativedelta
import uuid
ACCESS_TOKEN = settings.SQUARE_ACCESS_TOKEN
ENV = settings.SQUARE_ENV
LOCATION = settings.SQUARE_LOCATION
BASE_ADDR = settings.BASE_URL
sq = Client(access_token=ACCESS_TOKEN, environment=ENV)
#To create a new customer. Returns None if fails
def createCustomer(userID):
user = User.objects.get(id=userID)
newCust = sq.customers.create_customer(
{
"given_name": user.first_name,
"family_name": user.last_name,
"email_address": user.email,
"reference_id": user.id
}
)
if newCust.is_success():
return newCust.body["customer"]["id"]
else:
return None
#To get a link for the user to use to pay
def createCheckout(userID, subID, plan):
"""Plans: 0=Free, 1=Silver, 2=Gold"""
user = User.objects.get(id=userID)
planName = "LabLineup"
planPrice = 0
if plan == 1:
planName = "LabLineup Silver"
planPrice = 2000
elif plan == 2:
planName = "LabLineup Gold"
planPrice = 3000
result = sq.checkout.create_checkout(
location_id = LOCATION,
body = {
"idempotency_key": uuid.uuid4().hex,
"order": {
"order": {
"location_id": LOCATION,
"reference_id": str(subID),
"line_items": [
{
"name": planName,
"quantity": "1",
"base_price_money": {
"amount": planPrice,
"currency": "USD"
}
},
],
},
},
"ask_for_shipping_address": False,
"merchant_support_email": "[email protected]",
"pre_populate_buyer_email": str(user.email),
"redirect_url": (BASE_ADDR + "/subscriptionConfirmation/")
}
)
if result.is_success():
return result.body["checkout"]["checkout_page_url"]
elif result.is_error():
return None
else:
return None
#To find the most recent payment for a subscription
def findRecentPayment(subID):
"""Returns a product name and order/transaction ID"""
now = datetime.datetime.now(utc)
startDate = (now - relativedelta(years=1, days=1)).isoformat()
endDate = (now + relativedelta(days=1)).isoformat()
result = sq.orders.search_orders(
body = {
"location_ids": [LOCATION],
"query": {
"filter": {
"date_time_filter": {
"created_at": {
"start_at": str(startDate),
"end_at": str(endDate)
}
},
"state_filter": {
"states": [
"COMPLETED"
]
}
},
"sort": {
"sort_field": "CREATED_AT",
"sort_order": "DESC"
}
}
}
)
if result.is_success() and result.body != {}:
for order in result.body["orders"]:
if order["reference_id"] == str(subID):
orderAmt = order["tenders"][0]["amount_money"]["amount"]
if orderAmt != 0:
return (order["line_items"][0]["name"], order["id"])
return (None, None)
else:
return (None, None)
#To find which product was ordered by order/transaction ID
def findProductOrder(orderID):
"""Returns a product name and order/transaction ID"""
result = sq.orders.search_orders(
body = {
"location_ids": [LOCATION],
"query": {
"filter": {
"state_filter": {
"states": [
"COMPLETED"
]
}
}
}
}
)
if result.is_success() and result.body != {}:
for order in result.body["orders"]:
if order["id"] == orderID:
orderAmt = order["tenders"][0]["amount_money"]["amount"]
if orderAmt != 0:
return (order["line_items"][0]["name"])
return None
else:
return None
|
BryceTant/LabLineup
|
LLenv/LabLineup/app/Payment.py
|
Payment.py
|
py
| 4,739 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35031048423
|
from celery import Celery
from celery.schedules import crontab
app = Celery(
"vivaldi",
broker_url="redis://localhost:6379/0",
result_backend="redis://localhost:6379/0",
imports=["tasks"],
task_serializer="json",
result_serializer="json",
accept_content=["json"],
timezone="Europe/Lisbon",
)
beat_schedule = {
"set_default": {
"task": "tasks.default_browser",
"schedule": crontab(minute=0, hour=1),
"args": (),
},
}
if __name__ == "__main__":
app.start()
|
miccaldas/celery_and_friends
|
celery_and_friends/vivaldi/__init__.py
|
__init__.py
|
py
| 525 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22457592991
|
import torch
from transformers import BertTokenizer, BertModel, BertConfig, AdamW, BertForMaskedLM
from tokenizers import ByteLevelBPETokenizer
from tokenizers.implementations import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing
from sumerian_data import SumerianDataset
from typing import List, Tuple
import numpy as np
from tqdm import tqdm
import os
DEVICE = torch.device("cuda")
DATA_PATH = r"../data/sumerian_document_set.atf"
VOCAB_PATH = r"../data/sumerian_vocab_list"
def save_corpus(save_path: str, dataset: torch.utils.data.Dataset):
try:
os.mkdir(save_path)
except FileExistsError:
print("Directory path already exists.")
docs = dataset.get_corpus()
processed_doc = [" ".join(doc) for doc in docs]
with open(os.path.join(save_path, r"processed_corpus.txt"), "w") as fp:
fp.write("\n".join(processed_doc))
class SumarianBERTDataset(torch.utils.data.Dataset):
def __init__(self, tokenizer: ByteLevelBPETokenizer, data_path: str, evaluate: bool = False):
self.evaluate = evaluate
self.tokenizer = tokenizer
self.tokenizer._tokenizer.post_processor = BertProcessing(
("</s>", tokenizer.token_to_id("</s>")),
("<s>", tokenizer.token_to_id("<s>")),
)
self.tokenizer.enable_truncation(max_length=512)
self.training_labels, self.test_labels, self.training_mask, self.test_mask = self.__get_split(data_path)
def __get_split(self, data_path: str) -> Tuple[list, list]:
with open(data_path, "r") as file:
lines = file.read().split("\n")
lines_tokens = [line for line in self.tokenizer.encode_batch(lines)]
mask = [x.attention_mask for x in lines_tokens]
labels = [line.ids for line in lines_tokens]
indices = np.random.permutation(len(labels))
split = int(len(labels) * 0.8)
training_idxs, test_idxs = indices[:split], indices[split:]
training_labels, test_labels = [], []
training_mask, test_mask = [], []
for train_idx in training_idxs:
training_labels.append(labels[train_idx])
training_mask.append(mask[train_idx])
for test_idx in test_idxs:
test_labels.append(labels[test_idx])
test_mask.append(mask[test_idx])
return training_labels, test_labels, training_mask, test_mask
def __len__(self):
if self.evaluate:
return len(self.test_labels)
else:
return len(self.training_labels)
def __getitem__(self, i):
if self.evaluate:
return (
torch.tensor(self.test_labels[i]).type(torch.float),
torch.tensor(self.test_mask[i]).type(torch.float)
)
else:
return (
torch.tensor(self.training_labels[i]).type(torch.float),
torch.tensor(self.training_mask[i]).type(torch.float)
)
def collate_fn_padd(batch):
'''
Padds batch of variable length
note: it converts things ToTensor manually here since the ToTensor transform
assume it takes in images rather than arbitrary tensors.
'''
labels = [item[0] for item in batch]
att_mask = [item[1] for item in batch]
## get sequence lengths
lengths = torch.tensor([ t.shape[0] for t in labels ]).to(DEVICE)
## padd
labels = [ torch.Tensor(t).to(DEVICE) for t in labels ]
labels = torch.nn.utils.rnn.pad_sequence(labels)
att_mask = [ torch.Tensor(t).to(DEVICE) for t in att_mask ]
att_mask = torch.nn.utils.rnn.pad_sequence(att_mask)
## compute mask
mask = (labels != 0).to(DEVICE)
input_ids = labels.detach().clone()
rand = torch.rand(input_ids.shape)
mask_arr = (rand < .15) * (input_ids != 0) * (input_ids != 1) * (input_ids != 2)
for i in range(input_ids.shape[0]):
selection = torch.flatten(mask_arr[i].nonzero()).tolist()
input_ids[i, selection] = 3
return labels.T, att_mask.T, input_ids.T, lengths.T, mask.T
# model = BertModel.from_pretrained("bert-base-multilingual-cased")
# text = "Replace me by any text you'd like."
# encoded_input = tokenizer(text, return_tensors='pt')
# output = model(**encoded_input)
def main():
SAVE_CORPUS=False
MAKE_TOKENIZER=False
dataset = SumerianDataset(DATA_PATH, VOCAB_PATH)
save_path=r"../data/processed_data/"
if SAVE_CORPUS:
save_corpus(save_path, dataset)
if MAKE_TOKENIZER:
vocab_size = dataset.get_vocab_size()
tokenizer = ByteLevelBPETokenizer()
tokenizer.train(
files=os.path.join(save_path, r"processed_corpus.txt"),
vocab_size=vocab_size,
min_frequency=2,
special_tokens=[
"<s>",
"<pad>",
"</s>",
"<unk>",
"<mask>",
])
try:
os.mkdir(r"../tokenizer/")
except FileExistsError:
print("Tokenizer directory path already exists.")
tokenizer.save_model(r"../tokenizer/")
tokenizer = ByteLevelBPETokenizer(
"../tokenizer/vocab.json",
"../tokenizer/merges.txt",
)
BERT_dataset = SumarianBERTDataset(
tokenizer,
os.path.join(save_path, r"processed_corpus.txt"),
evaluate=False)
BERT_train_loader = torch.utils.data.DataLoader(
BERT_dataset,
batch_size=16,
shuffle=True,
collate_fn=collate_fn_padd)
config = BertConfig(
vocab_size=dataset.get_vocab_size(),
max_position_embeddings=512,
hidden_size=768,
num_attention_heads=12,
num_hidden_layers=12,
type_vocab_size=2
)
# config = BertConfig(
# vocab_size=dataset.get_vocab_size(),
# max_position_embeddings=512,
# hidden_size=768,
# num_attention_heads=4,
# num_hidden_layers=4,
# type_vocab_size=1
# )
model = BertForMaskedLM(config)
multling_model = BertModel.from_pretrained("bert-base-multilingual-cased")
multling_params = multling_model.state_dict()
# Remove params that are a mismatch with current model.
del multling_params["embeddings.position_ids"]
del multling_params['embeddings.word_embeddings.weight']
del multling_params['embeddings.position_embeddings.weight']
del multling_params['embeddings.token_type_embeddings.weight']
del multling_params['embeddings.LayerNorm.weight']
del multling_params['embeddings.LayerNorm.bias']
model.load_state_dict(multling_params, strict=False)
model.to(DEVICE)
print("Number of parameters: ", end="")
print(model.num_parameters())
model.train()
optim = AdamW(model.parameters(), lr=1e-4)
epochs = 2
for epoch in range(epochs):
# setup loop with TQDM and dataloader
loop = tqdm(BERT_train_loader, leave=True)
for batch in loop:
optim.zero_grad()
labels, attention_mask, input_ids, lengths, mask = batch
input_ids.to(DEVICE)
attention_mask.to(DEVICE)
labels.to(DEVICE)
outputs = model(input_ids.long(), attention_mask=attention_mask, labels=labels.long().contiguous())
loss = outputs.loss
loss.backward()
optim.step()
loop.set_description(f'Epoch {epoch}')
loop.set_postfix(loss=loss.item())
if __name__=="__main__":
main()
|
sethbassetti/sumerian_embeddings
|
src/BERTmodel.py
|
BERTmodel.py
|
py
| 7,800 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34712009040
|
# Coding Math Episode 9b
# Add the acceleration and observe how it impacts velocity (speed + direction)
# on the fireworks
__author__ = "piquesel"
import pygame
import math
import random
from ep7 import Vector as Vector
class Particle:
'Represents a particle defined by its position, velocity and direction'
def __init__(self, speed, direction, x=0, y=0, gravity=0):
'Initialize the particle'
self.x = x
self.y = y
self.position = Vector(x, y)
self.velocity = Vector(0, 0)
self.velocity.set_length(speed)
self.velocity.set_angle(direction)
self.gravity = Vector(0, gravity)
def accelerate(self, accel):
self.velocity.add_to(accel)
def update(self):
self.velocity.add_to(self.gravity)
self.position.add_to(self.velocity)
pygame.init()
NB_LINES = 100
RED = pygame.color.THECOLORS['red']
screen = pygame.display.set_mode((800, 600))
screen_rect = screen.get_rect()
pygame.display.set_caption("Episode 9b")
particles = []
NUM_PARTICLES = 300
gravity = Vector(0, 0.1)
for i in range(0, NUM_PARTICLES):
particles.append(Particle(random.random() * 5 + 2,
random.random() * math.pi * 2,
screen_rect.width/2,
screen_rect.height/3,
0.01))
main_loop = True
while main_loop:
pygame.time.delay(10)
for event in pygame.event.get():
if (event.type == pygame.QUIT
or event.type == pygame.KEYDOWN
and event.key == pygame.K_ESCAPE):
main_loop = False
screen.fill((0, 0, 0))
for i in range(0, NUM_PARTICLES):
p = particles[i]
p.accelerate(gravity)
p.update()
pygame.draw.circle(screen, RED, (round(p.position.get_x()),
round(p.position.get_y())), 5)
pygame.display.update()
pygame.quit()
|
piquesel/coding-math
|
ep9b.py
|
ep9b.py
|
py
| 1,954 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4546238710
|
import cv2
from matplotlib.pyplot import contour
import numpy as np
import matplotlib.pyplot as plt
from random import randint
import matplotlib.pyplot as plt
def DetectPositionMaxSkin(filename,x, y, w, h, lower, upper):
#y=y+50
Image = cv2.VideoCapture(filename)
#Image = cv2.VideoCapture('t8.mp4')
success, frame = Image.read()
while success :
success, frame = Image.read()
#cv2.imshow('Imagem Original', frame)
if success:
cropeedIMAGE = frame[y:y+h, x:x+w]
converted = cv2.cvtColor(cropeedIMAGE, cv2.COLOR_BGR2HSV)
#cv2.imshow('convertedHSV',converted)
skinMask = cv2.inRange(converted, lower, upper)
#cv2.imshow('skin',skinMask)
# apply a series of erosions and dilations to the mask
# using an elliptical kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (12, 12))
skinMask = cv2.erode(skinMask, kernel, iterations=3)
skinMask = cv2.dilate(skinMask, kernel, iterations=3)
# blur the mask to help remove noise, then apply the
# mask to the frame
skinMask = cv2.GaussianBlur(skinMask, (11, 11), 5)
#cv2.imshow('skinMask',skinMask)
skin = cv2.bitwise_and(cropeedIMAGE, cropeedIMAGE, mask=skinMask)
#cv2.imshow('skin',skin)
########################################################
#lowerFinger =np.array([8, 15, 110], dtype="uint8")
#upperFinger = np.array([8, 15, 110], dtype="uint8")
hsv_img = cv2.cvtColor(skin, cv2.COLOR_BGR2HSV)
#hsv_img = cv2.inRange(hsv_img, lowerFinger, upperFinger)
#cv2.imshow('hsv_img', hsv_img)
# Extracting Saturation channel on which we will work
img_s = hsv_img[:, :, 1]
#img_s = skin[:, :, 1]
#cv2.imshow('img_s', img_s)
# smoothing before applying threshold
img_s_blur = cv2.GaussianBlur(img_s, (7,7), 2)
#img_s_blur = cv2.medianBlur(skin,5)
#cv2.imshow('img_s_blur', img_s_blur)
img_s_binary = cv2.threshold(img_s_blur, 200, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1] # Thresholding to generate binary image (ROI detection)
#cv2.imshow('img_s_binary1', img_s_binary1)
# reduce some noise
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 4))
img_s_binary = cv2.morphologyEx(img_s_binary, cv2.MORPH_OPEN, kernel, iterations=4)
#cv2.imshow('img_s_binary1', img_s_binary)
# ROI only image extraction & contrast enhancement, you can crop this region
#img_croped = cv2.bitwise_and(img_s, img_s_binary) * 10
#cv2.imshow('img_croped', img_croped)
# eliminate
kernel = np.ones((5, 5), np.float32)/25
processedImage = cv2.filter2D(img_s_binary, -1, kernel)
img_s_binary[processedImage > 250] = 0
#cv2.imshow('img_s_binary2', img_s_binary)
edges = cv2.threshold(img_s_binary, 100, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
#th3 = cv2.adaptiveThreshold(img_s_blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
#_,edges = cv2.threshold(img_croped, 160, 255, cv2.THRESH_BINARY_INV)
#cv2.imshow('edges', edges)
#https://docs.opencv.org/3.4/da/d0c/tutorial_bounding_rects_circles.html
contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#print("Number of contours =" + str(len(contours)))
#print("Number of hierarchy =" + str(len(hierarchy)))
#print(np.argmax(hierarchy))
contours_poly = [None]*len(contours)
centers = [None]*len(contours)
radius = [None]*len(contours)
#area= [None]*len(contours)
#drawing = np.zeros((edges.shape[0], edges.shape[1], 3), dtype=np.uint8)
for i, c in enumerate(contours):
contours_poly[i] = cv2.approxPolyDP(c, 0.02, True)
#boundRect[i] = cv2.boundingRect(contours_poly[i])
centers[i], radius[i] = cv2.minEnclosingCircle(contours_poly[i])
#area[i] = cv2.contourArea(contours[i])
#print("area: %s" % area)
#if i>=6 and cv2.contourArea(contours[i]) >= 100:
if 5000 >= cv2.contourArea(contours[i]) <= 7600 and radius[i] < 50:
#cv2.drawContours(skin, contours_poly, i, (255,0,0))
cv2.circle(skin, (int(centers[i][0]), int(centers[i][1])), int(radius[i]), (0,0,255), 2)
#cv2.imshow('Contours', skin)
cv2.imshow('skin', skin)
#print((centers[i][0]))S
#xe=np.arange(1,121)
#print(len(xe))
#plt.plot(x,centers[i][0],'ro')
#plt.ylabel('some numbers')
#plt.show()
#cv2.imshow('Skin Mask', skinMask)
#cv2.imshow('Skin', skin)
#vcat = cv2.hconcat((skinMask, skin))
#cv2.imshow('vcat', vcat)
#cv2.imshow('hsv_img', hsv_img)
#cv2.imshow('Extracting Saturation', img_s)
#cv2.imshow('img_s_binary1', img_s_binary1)
#cv2.imshow('img_croped', img_croped)
#cv2.imshow('edges', edges)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
#xc,yc,wc,hc,skin,skinMask,hsv_img,img_s_blur,img_s_binary1,img_croped,edges,cropeedIMAGE
|
raquelpantojo/Detectskin
|
DetectNail.py
|
DetectNail.py
|
py
| 6,004 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27828726752
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import re
import arcpy
import sys
import traceback
import tempfile
import os
__author__ = "Coen Jonker"
class ColumnParser(object):
def __init__(self):
self.non_alpha_pattern = re.compile(r'[^A-Za-z0-9_]')
self.ending = re.compile(r'_$')
self.beginning = re.compile(r'^_')
self.doubles = re.compile(r'__')
self.next_number = {}
def parseColname(self, name_in):
temp = re.sub(self.doubles, '_',
re.sub(self.ending, '',
re.sub(self.beginning, '',
re.sub(self.non_alpha_pattern, '_', name_in)
)
)
)
if temp[0] in '0123456789':
temp = "N" + temp
if temp in self.next_number:
nn = self.next_number[temp]
temp += "_{0}".format(nn)
nn += 1
else:
nn = 1
self.next_number[temp] = nn
return temp
if __name__ == "__main__":
input_xlsx = arcpy.GetParameterAsText(0)
sheetname = arcpy.GetParameterAsText(1)
workspace = arcpy.GetParameterAsText(3)
input_fc = arcpy.GetParameterAsText(2)
arcpy.env.overwriteOutput = True
arcpy.env.workspace = workspace
temppath = tempfile.mkdtemp()
output_csv = os.path.join(temppath, "tactisch_plan_schiphol_parsed.csv")
try:
# Data inladen
data = pd.read_excel(input_xlsx, sheetname=sheetname, skiprows=1, header=0)
# Parser instantieren
parser = ColumnParser()
# Kolomnamen parsen
colnames = [parser.parseColname(x) for x in data.columns]
# Nieuwe kolomnamen aan dataframe toevoegen
data.columns = colnames
# data["Projectnummer_str"] = data["Projectnummer"].astype(str).apply(lambda x: x.split('.')[0])
# OUDE CODE, POGING OM TABEL RECHTSTREEKS WEG TE SCHRIJVEN
"""
n_array = np.array(np.rec.fromrecords(data.values))
names = data.dtypes.index.tolist()
n_array.dtype.names = tuple(names)
arcpy.AddMessage(names)
arcpy.da.NumPyArrayToTable(n_array, "Tactischplan")
"""
# CSV wegschrijven
data.to_csv(output_csv, index=False, encoding='utf-8')
arcpy.TableToTable_conversion(output_csv, workspace, "Tactischplan")
arcpy.AddField_management("Tactischplan", "ProjectNR_STR", "TEXT")
arcpy.CalculateField_management("Tactischplan", "ProjectNR_STR", "str(int(!Projectnummer!))")
arcpy.CopyFeatures_management(input_fc, "ingetekendTactischplan")
arcpy.JoinField_management("ingetekendTactischplan", "PROJECTNUMMER", "Tactischplan", "ProjectNR_STR")
except:
# Get the traceback object
#
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
#
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages(2) + "\n"
# Return python error messages for use in script tool or Python Window
#
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# Print Python error messages for use in Python / Python Window
#
print(pymsg)
print(msgs)
|
wagem007/asset-management-geo
|
tactisch_plan_xlsx_to_feature_class.py
|
tactisch_plan_xlsx_to_feature_class.py
|
py
| 3,474 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71775260987
|
"""
Leia um valor inteiro em segundos e imprima-o em horas, minutos e segundos.
"""
t = int(input('Digite uma quantidade de segundos: '))
h = t / 3600
resto = t % 3600
m = resto / 60
s = resto % 60
print(f'{h} horas, {m} minutos e {s} seguntos.')
|
Hugolimaslv/guppe
|
guppe/Seção 04/48.py
|
48.py
|
py
| 258 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
30353259361
|
from os import path
import os
import sys
from os.path import join, dirname
# Enthought library imports.
from pyface.action.api import Action
from traitsui.api import auto_close_message
# Local imports
import mayavi.api
from mayavi.core.common import error
from mayavi.preferences.api import preference_manager
# To find the html documentation directory, first look under the
# standard place. If that directory doesn't exist, assume you
# are running from the source.
local_dir = dirname(mayavi.api.__file__)
HTML_DIR = join(local_dir, 'html')
if not path.exists(HTML_DIR):
HTML_DIR = join(dirname(dirname(local_dir)),
'build', 'docs', 'html', 'mayavi')
if not path.exists(HTML_DIR):
HTML_DIR = None
def browser_open(url):
if sys.platform == 'darwin':
os.system('open %s &' % url)
else:
import webbrowser
webbrowser.open(url, autoraise=1)
def open_help_index(*args):
""" Open the mayavi user manual index in a browser.
"""
# If the HTML_DIR was found, bring up the documentation in a
# web browser. Otherwise, bring up an error message.
if HTML_DIR:
auto_close_message("Opening help in web browser...")
browser_open(join(HTML_DIR, 'index.html'))
else:
browser_open('https://docs.enthought.com/mayavi/mayavi/')
def open_tvtk_docs(*args):
""" Open the TVTK class browser.
"""
from tvtk.tools.tvtk_doc import TVTKClassChooser
TVTKClassChooser().edit_traits()
######################################################################
# `HelpIndex` class.
######################################################################
class HelpIndex(Action):
""" An action that pop up the help in a browser. """
tooltip = "The Mayavi2 user guide"
description = "The Mayavi2 user guide"
###########################################################################
# 'Action' interface.
###########################################################################
def perform(self, event):
""" Performs the action. """
open_help_index()
######################################################################
# `TVTKClassBrowser` class.
######################################################################
class TVTKClassBrowser(Action):
""" An action that opens the tvtk interactive class browser. """
tooltip = "The TVTK interactive class browser"
description = "The TVTK interactive class browser"
###########################################################################
# 'Action' interface.
###########################################################################
def perform(self, event):
""" Performs the action. """
open_tvtk_docs()
|
enthought/mayavi
|
mayavi/action/help.py
|
help.py
|
py
| 2,807 |
python
|
en
|
code
| 1,177 |
github-code
|
6
|
37005580941
|
#coding: utf-8
import tornado.web
from basehandler import BaseHandler
from lib.lrclib import LrcLib
from lib.songinfo import SongInfo
from models import MusicModel, UserModel
import json
import re
import logging
log = logging.getLogger("index")
log.setLevel(logging.DEBUG)
rm_regex = r"/(\([^\)]*\))|(\[[^\]]*\])|(([^)]*))|(【[^】]*】)|((-|\/|&).*)/g"
def simplify(string):
return re.sub(rm_regex, "", string)
class IndexHandler(BaseHandler):
def __init__(self, application, request, **kwargs):
self._lrclib = LrcLib()
self._info = SongInfo()
self._music = MusicModel()
self._users = UserModel()
super(IndexHandler, self).__init__(application, request, **kwargs)
def get(self):
user_id = self.current_user
log.debug("user_id is {0}".format(user_id))
# if not user_id:
# return self.render('login.html')
account = ""
if user_id:
user = self._users.get_one(user_id)
log.debug(user)
if user:
account = user["account"]
return self.render('index.html', current_user = account, domain=self.request.full_url())
def on_error(self):
return self.write(json.dumps({
"code": 1
}))
def post(self):
request = json.loads(self.request.body)
song = dict(
sid = request.get("songId"),
artist = request.get("artist"),
title = request.get("title"),
channel = request.get("channel"),
share_url = request.get("shareUrl"),
album_img = request.get("albumImgUrl"),
start_time = request.get("startTime")
)
lrc, song_info = self._music.get_rep(song["sid"])
if lrc and song_info:
lrc.update({"startTime": song["start_time"]})
return self.write(json.dumps({
"code": 0,
"lyricsInfo": lrc,
"songInfo": song_info
}))
lrc = self._lrclib.getlrc(simplify(song["title"]), simplify(song["artist"]))
if not song_info:
info_res = self._info.get_info(song["share_url"])
if not info_res:
return self.on_error()
song_info = info_res["song"][0]
song_info = {
"album": song_info["albumtitle"],
"albumId": song_info["aid"],
"albumImgUrl": song_info["picture"],
"albumUrl": song_info["album"],
"artist": song_info["artist"],
"company": song_info["company"],
"duration": song_info["length"],
"mp3Url": song_info["url"],
"rating": song_info["rating_avg"],
"releaseYear": song_info["public_time"],
"songId": song_info["sid"],
"ssid": song_info["ssid"],
"startToken": song_info["start_token"],
"title": song_info["title"],
"shareUrl": song["share_url"]
}
response = json.dumps({
"code": 0,
"lyricsInfo": {
"lyrics": lrc,
"offset": 0,
"startTime": song["start_time"]
},
"songInfo": song_info
})
self._music.set_rep(song["sid"], dict(lyrics=lrc,offset=0), song_info)
return self.write(response)
|
mgbaozi/ruankusb
|
handlers/index.py
|
index.py
|
py
| 3,525 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15697493759
|
import cv2 as cv
# Cargamos la imagen y transformamos en blanco y negro
img_original = cv.imread("imgs/Cuadrados.jpg")
img_bnw = cv.cvtColor(img_original, cv.COLOR_BGR2GRAY)
# Aplicamos la funcion de deteccion de esquinas
maxCorners = 20
esquinas = cv.goodFeaturesToTrack(img_bnw, maxCorners, 0.01, 10)
# Definimos el criterio de stop para la precision subpixel y lo aplicamos
criterio_stop = (cv.TERM_CRITERIA_MAX_ITER + cv.TERM_CRITERIA_EPS, maxCorners, 0.0001)
esquinas_final = cv.cornerSubPix(img_bnw, esquinas, (5,5), (2,2), criterio_stop)
# Dibujamos sobre la imagen
for esquina in esquinas_final:
x, y = esquina.ravel()
cv.circle(img_original, (int(x), int(y)), 2, (0,0,0), -1)
cv.namedWindow("original", cv.WINDOW_NORMAL)
cv.imshow("original", img_original)
cv.waitKey()
cv.destroyAllWindows()
|
FadedGuy/Universidad
|
L3/visionComputador/tp5/cuestiones/8.py
|
8.py
|
py
| 816 |
python
|
es
|
code
| 2 |
github-code
|
6
|
39425077344
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# -*- coding: utf-8 -*-
#
# Last modified: Fri, 14 Oct 2022 02:22:56 +0900
import numpy as np
import pandas as pd
import os
from .measurePhenotypes import measurePhenotypes
from ..util.isNotebook import isnotebook
if isnotebook():
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
def annotateLineageIdx(**kwargs):
if len({"matFilePath", "segImgsPath", "rawImgsPath"} & set(kwargs.keys())) > 2:
originFrame = 0
if "originFrame" in kwargs.keys():
originFrame = kwargs["originFrame"]
return annotateSchnitz(
kwargs["matFilePath"],
kwargs["segImgsPath"],
kwargs["rawImgsPath"],
originFrame,
)
elif "tanauchi" in kwargs.keys():
cutOff = None
if "cutOff" in kwargs.keys():
cutOff = kwargs["cutOff"]
return annotateTanauchi(kwargs["tanauchi"], cutOff)
elif "wang" in kwargs.keys():
cutOff = None
if "cutOff" in kwargs.keys():
cutOff = kwargs["cutOff"]
return annotateWang(kwargs["wang"], cutOff)
elif "wakamoto" in kwargs.keys():
cutOff = None
if "cutOff" in kwargs.keys():
cutOff = kwargs["cutOff"]
return annotateHashimoto(kwargs["wakamoto"], cutOff)
else:
print("Error")
sys.exit(-1)
def annotateWang(WangDir, cutOff=None, fileTag=".dat"):
coord = sorted(
[
os.path.join(WangDir, d)
for d in os.listdir(WangDir)
if os.path.isdir(os.path.join(WangDir, d)) and "xy" in d
]
)
linIdx = 0
ID = []
motherID = []
uID = 0
Z = []
intensity = []
area = []
daughter1ID = []
daughter2ID = []
cenX = []
cenY = []
linIdx = []
lineages = []
if cutOff is not None:
x, y = cutOff
for dName in coord:
xy = int(dName[-2:])
if xy <= x:
lins = sorted(
[
os.path.join(dName, f)
for f in os.listdir(dName)
if os.path.isfile(os.path.join(dName, f)) and fileTag in f
]
)
lineages += lins[:y]
else:
for dName in coord:
lineages += [
os.path.join(dName, f)
for f in os.listdir(dName)
if os.path.isfile(os.path.join(dName, f)) and fileTag in f
]
for lin in tqdm(lineages):
cellNo = 0
with open(lin, "r") as data:
next(data)
for line in data:
if cellNo == 0:
motheruID = -1
else:
daughter1ID.append(uID)
motheruID = uID - 1
motherID.append(motheruID)
ID.append(cellNo)
aa = line.split(" ")
cenX.append(float(aa[6]))
cenY.append(float(aa[7]))
Z.append(int(aa[0]))
intensity.append(float(aa[5]))
area.append(float(aa[4])) # Acutally Length
if int(aa[1]) == 1:
daughter2ID.append(-3)
else:
daughter2ID.append(-1)
cellNo += 1
uID += 1
linIdx.append(lineages.index(lin))
daughter1ID.append(-1)
cellDict = {
"ID": np.array(ID),
"uID": np.array(range(uID)),
"motherID": np.array(motherID),
"daughter1ID": np.array(daughter1ID),
"daughter2ID": np.array(daughter2ID),
"cenX": np.array(cenX),
"cenY": np.array(cenY),
"Z": np.array(Z),
"cellNo": np.array(ID),
"intensity": np.array(intensity),
"area": np.array(area),
"linIdx": np.array(linIdx),
}
# for key in cellDict.keys():
# print(key,cellDict[key])
CellDfWP = pd.DataFrame(cellDict)
return CellDfWP
def annotateTanauchi(TanauchiDir, cutOff=None):
files = os.listdir(TanauchiDir)
linIdx = 0
ID = []
motherID = []
uID = 0
Z = []
intensity = []
area = []
daughter1ID = []
daughter2ID = []
cenX = []
cenY = []
linIdx = []
lineages = []
if cutOff is not None:
for lin in files:
x, y = cutOff
coord = lin[2:].split(".")[0].split("_")
if int(coord[0]) - 1 < x and int(coord[1]) - 1 < y:
lineages.append(lin)
else:
lineages = files
for lin in tqdm(lineages):
cellNo = 0
coord = lin[2:].split(".")[0].split("_")
with open(os.path.join(TanauchiDir, lin), "r") as data:
for line in data:
cenX.append(int(coord[0]))
cenY.append(int(coord[1]))
if cellNo == 0:
motheruID = -1
else:
daughter1ID.append(uID)
motheruID = uID - 1
motherID.append(motheruID)
ID.append(cellNo)
aa = line.split(",")
Z.append(int(aa[0]) - 1)
intensity.append(float(aa[4]))
area.append(float(aa[2])) # Acutally Length
if int(aa[1]) == 1:
daughter2ID.append(-3)
else:
daughter2ID.append(-1)
cellNo += 1
uID += 1
linIdx.append(lineages.index(lin))
daughter1ID.append(-1)
cellDict = {
"ID": np.array(ID),
"uID": np.array(range(uID)),
"motherID": np.array(motherID),
"daughter1ID": np.array(daughter1ID),
"daughter2ID": np.array(daughter2ID),
"cenX": np.array(cenX),
"cenY": np.array(cenY),
"Z": np.array(Z),
"cellNo": np.array(ID),
"intensity": np.array(intensity),
"area": np.array(area),
"linIdx": np.array(linIdx),
}
# for key in cellDict.keys():
# print(key,cellDict[key])
CellDfWP = pd.DataFrame(cellDict)
return CellDfWP
def annotateHashimoto(HashimotoDir, cutOff=None):
linIdx = 0
ID = []
motherID = []
uID = 0
Z = []
intensity = []
area = []
daughter1ID = []
daughter2ID = []
cenX = []
cenY = []
lineages = []
columnName = [
"ID",
"uID",
"motherID",
"daughter1ID",
"daughter2ID",
"cenX",
"cenY",
"Z",
"cellNo",
"intensity",
"area",
"linIdx",
]
CellDfWP = pd.DataFrame(columns=columnName)
data = pd.read_table(HashimotoDir, index_col=0)
lastCell = data[data["LastIndex"] == 1].sort_index()
if cutOff is not None:
lastCell = lastCell.iloc[: int(cutOff)]
for lCell in tqdm(lastCell.iterrows()):
progenyId, lCell = lCell
daughter1ID = -1
motherId = int(lCell["PreviousCell"])
while motherId >= 0:
info = [
progenyId,
motherId,
daughter1ID,
-1,
float(lCell["XM"]),
float(lCell["YM"]),
lCell["Slice"] - 1,
float(lCell["Mean"]) - float(lCell["Background"]),
float(lCell["Area"]),
linIdx,
] # Add row with this info
columns = columnName[1:8] + columnName[-3:] # skip over ID and cellNo
df = dict(zip(columns, info))
if motherId in list(CellDfWP["uID"]) and motherId != 0:
mask = CellDfWP["uID"] == motherId
CellDfWP.loc[mask, "daughter2ID"] = progenyId
d1 = CellDfWP.loc[mask, "daughter1ID"]
d2 = CellDfWP.loc[mask, "daughter2ID"]
if int(d1) > int(d2):
tmp = int(d1)
d1 = int(d2)
d2 = tmp
maskLin = CellDfWP["linIdx"] == linIdx
linIdx -= 1
CellDfWP.loc[maskLin, "linIdx"] = int(CellDfWP.loc[mask, "linIdx"])
CellDfWP = CellDfWP.append(df, ignore_index=True)
break
# CellDfWP[CellDfWP['uID'] == motherId]['daughter2ID'] = progenyId
daughter1ID = progenyId
progenyId = motherId
if motherId == 0:
motherId = -1
df["motherID"] = motherId
else:
lCell = data.iloc[motherId - 1]
motherId = int(lCell["PreviousCell"])
CellDfWP = CellDfWP.append(df, ignore_index=True)
linIdx += 1
CellDfWP = CellDfWP.sort_values(by="Z", ascending=True)
CellDfWP["ID"] = list(range(len(CellDfWP)))
CellDfWP["cellNo"] = list(range(len(CellDfWP)))
# CellDfWP['uID'] = CellDfWP['uID'].astype(int)
# CellDfWP['motherID'] = CellDfWP['motherID'].astype(int)
# CellDfWP['daughter1ID'] = CellDfWP['daughter1ID'].astype(int)
# CellDfWP['daughter2ID'] = CellDfWP['daughter2ID'].astype(int)
# CellDfWP['Z'] = CellDfWP['Z'].astype(int)
# CellDfWP['linIdx'] = CellDfWP['linIdx'].astype(int)
return CellDfWP
def annotateSchnitz(matFilePath, segImgsPath, rawImgsPath, originFrame=0):
"""
Annotate lineage indices accorting to the result of cell tracking.
Parameters
----------
matFilePath : string
A path to MAT files which were created by Schnitzcells
segImgsPath : string
A path to directory which include segmentated images which
were created by Schnitzcells
rawImgsPath : string
A path to direcotry which include raw images
which were required for Schnitzcells
Returns
-------
cellDfWPL : pandas.core.frame.DataFrame
A pandas dataframe which includes tracking result and
phenotypes of each cell, lineage indices.
Its name is abbreviate for cell DataFrame
With Phenotypes, Lineage indices.
Column indices are like below
- ID
- uID
- motherID
- daughter1ID
- daughter2ID
- cenX
- cenY
- Z
- cellNo
- intensity
- area
- linIdx
"""
cellDfWP = measurePhenotypes(matFilePath, segImgsPath, rawImgsPath, originFrame)
numOfLin = 0
for uID in cellDfWP["uID"]:
daughter1ID = cellDfWP["daughter1ID"][uID]
daughter2ID = cellDfWP["daughter2ID"][uID]
if daughter1ID < 0 and daughter2ID < 0:
numOfLin += 1
linIdx = 0
linList = np.zeros(len(cellDfWP["uID"]))
for uID in cellDfWP["uID"]:
motherID = cellDfWP["motherID"][uID]
if motherID == -1:
linList[uID] = linIdx
linIdx += 1
else:
sister1ID = cellDfWP["daughter1ID"][motherID]
sister2ID = cellDfWP["daughter2ID"][motherID]
if sister1ID > 0 and sister2ID > 0: # 親が分裂していたら
if sister1ID == uID:
linList[uID] = linList[motherID]
else:
linList[uID] = linIdx
linIdx += 1
else: # 親が分裂していなかったら
linList[uID] = linList[motherID]
linIdx = 0
for uID in cellDfWP["uID"]:
motherID = cellDfWP["motherID"][uID]
if motherID == -1:
linList[uID] = linIdx
linIdx += 1
else:
sister1ID = cellDfWP["daughter1ID"][motherID]
sister2ID = cellDfWP["daughter2ID"][motherID]
if sister1ID > 0 and sister2ID > 0: # 親が分裂していたら
if sister1ID == uID:
linList[uID] = linList[motherID]
else:
linList[uID] = linIdx
linIdx += 1
else: # 親が分裂していなかったら
linList[uID] = linList[motherID]
linList = linList.astype(np.int64)
cellDfWP["linIdx"] = linList
return cellDfWP
if __name__ == "__main__":
matFilePath = (
"/Users/itabashi/Research/Analysis/Schnitzcells/"
"9999-99-99/488/data/488_lin.mat"
)
segImgsPath = (
"/Users/itabashi/Research/Analysis/Schnitzcells/" "9999-99-99/488/segmentation/"
)
rawImgsPath = (
"/Users/itabashi/Research/Experiment/microscope/"
"2018/08/28/ECTC_8/Pos0/forAnalysis/488FS/"
)
cellDfWPL = annotateLineageIdx(matFilePath, segImgsPath, rawImgsPath)
print(cellDfWPL)
|
funalab/pyCellLineage
|
lineageIO/annotateLineageIdx.py
|
annotateLineageIdx.py
|
py
| 12,848 |
python
|
en
|
code
| 1 |
github-code
|
6
|
30087901686
|
#importing tkinter module
from tkinter import *
from tkinter import ttk
#Creating object - root of Tk()
root = Tk()
#this will make a screen size
root.geometry("500x500")
root.configure(background="lightgreen")
#Providing title to the form
root.title('Registration form')
#this creates 'Label' widget for Registration Form and uses place() method.
label_0 =Label(root,text="Registration form", width=15,font=("bold",20))
#this method used to apply in specific postion
label_0.place(x=145,y=60)
label_1 =Label(root,text="FullName", width=10,font=("bold",10))
label_1.place(x=70,y=130)
entry_1=Entry(root)
entry_1.place(x=240,y=130)
label_3 =Label(root,text="Email", width=10,font=("bold",10))
label_3.place(x=68,y=180)
entry_3=Entry(root)
entry_3.place(x=240,y=180)
label_4 =Label(root,text="Gender", width=10,font=("bold",10))
label_4.place(x=70,y=230)
#the variable 'var' mentioned here holds Integer Value, by deault 0
var=IntVar()
#this creates 'Radio button' widget and uses place() method
Radiobutton(root,text="Male",padx= 5, variable= var, value=1).place(x=235,y=230)
Radiobutton(root,text="Female",padx= 20, variable= var, value=2).place(x=290,y=230)
label_5=Label(root,text="Country",width=10,font=("bold",10))
label_5.place(x=70,y=280)
#this creates list of countries available in the dropdownlist.
list_of_country=[ 'India' ,'US' , 'UK' ,'Germany' ,'Austria']
#the variable 'c' mentioned here holds String Value, by default ""
c=StringVar()
droplist=OptionMenu(root,c, *list_of_country)
droplist.config(width=15)
c.set('Select your Country')
droplist.place(x=240,y=280)
# Creating Check Box
#the variable 'var1' mentioned here holds Integer Value, by default 0
var1=IntVar()
#this creates Checkbutton widget and uses place() method.
Checkbutton(root,text="Accept Terms and Condition", variable=var1).place(x=230,y=330)
#this creates button for submitting the details provides by the user
Button(root, text='Submit' , width=20,bg="black",fg='white',font=("bold",10)).place(x=70,y=380)
#this will run the mainloop.
root.mainloop()
|
nithish19bcs064/Best_Enlist_Assignments
|
day3.py
|
day3.py
|
py
| 2,132 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3177305776
|
from flipperzero_cli import CONFIG, load_config, show_config, \
read_until_prompt, print_until_prompt, check_file_presence, \
flipper_init, main, \
storage_read, save_file, download_from_flipper, \
upload_to_flipper, check_local_md5, compare_md5
import builtins
import pytest
from unittest.mock import patch, mock_open
from .mock_serial import Serial
DEFAULT_CONFIG = {"filename": None,
"port": None,
"show_banner": 0,
"hide_command": False,
"show_config": False}
DEFAULT_COMMAND = ["help"]
# Helpers
def updated_config(data):
new_config = DEFAULT_CONFIG.copy()
for k, v in data.items():
new_config[k] = v
return new_config
def call_with(m, parameters=[], new_env={}):
for k in [
"FLIPPER_ZERO_SHOW_BANNER",
"FLIPPER_ZERO_HIDE_COMMAND",
"FLIPPER_ZERO_PORT",
"FLIPPER_ZERO_FILENAME",
]:
if k not in new_env:
m.delenv(k, raising=False)
for k, v in new_env.items():
m.setenv(k, v)
m.setattr("sys.argv", ["cli.py"] + parameters)
# Tests
def test_load_config(monkeypatch):
with monkeypatch.context() as m:
# Test without env variable and command line parameters
call_with(m, [])
assert load_config() == DEFAULT_COMMAND
assert CONFIG == DEFAULT_CONFIG
# Only test with env parameters
call_with(m, [], {"FLIPPER_ZERO_SHOW_BANNER": "1"})
load_config()
assert CONFIG == updated_config({"show_banner": True})
call_with(m, [], {"FLIPPER_ZERO_HIDE_COMMAND": "1"})
load_config()
assert CONFIG == updated_config({"hide_command": True})
call_with(m, [], {"FLIPPER_ZERO_PORT": "/dev/flipper0"})
load_config()
assert CONFIG == updated_config({"port": "/dev/flipper0"})
call_with(m, [], {"FLIPPER_ZERO_FILENAME": "/home/flipper/dolpin.txt"})
load_config()
assert CONFIG == updated_config({"filename":
"/home/flipper/dolpin.txt"})
call_with(m, [], {
"FLIPPER_ZERO_SHOW_BANNER": "1",
"FLIPPER_ZERO_HIDE_COMMAND": "0",
"FLIPPER_ZERO_PORT": "/dev/flipper0",
"FLIPPER_ZERO_FILENAME": "/home/flipper/dolpin.txt",
})
load_config()
assert CONFIG == updated_config({
"show_banner": True,
"hide_command": False,
"port": "/dev/flipper0",
"filename": "/home/flipper/dolpin.txt",
})
# Test with command line parameters
# -p --port
call_with(m, ["-p", "/dev/flipper0"])
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"port": "/dev/flipper0"})
call_with(m, ["--port", "/dev/flipper0"])
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"port": "/dev/flipper0"})
call_with(m, ["--port", "/dev/flipper1"],
{"FLIPPER_ZERO_PORT": "/dev/flipper0"})
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"port": "/dev/flipper1"})
# -f --filename
call_with(m, ["-f", "/home/flipper/dolpin1.txt"])
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"filename":
"/home/flipper/dolpin1.txt"})
call_with(m, [ "--filename", "/home/flipper/dolpin2.txt"])
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"filename":
"/home/flipper/dolpin2.txt"})
call_with(m, ["-f", "/home/flipper/dolpin3.txt"],
{"FLIPPER_ZERO_FILENAME": "/home/flipper/dolpin.txt"})
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"filename":
"/home/flipper/dolpin3.txt"})
# --show-banner
call_with(m, ["--show-banner"])
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"show_banner": True})
call_with(m, ["--show-banner"], {"FLIPPER_ZERO_SHOW_BANNER": "1"})
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"show_banner": True})
# --hide-command
call_with(m, ["--hide-command"])
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"hide_command": True})
call_with(m, ["--hide-command"], {"FLIPPER_ZERO_HIDE_COMMAND": "1"})
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"hide_command": True})
# --show-config
call_with(m, ["--show-config"])
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"show_config": True})
# Test different values for FLIPPER_ZERO_SHOW_BANNER
for v in ["1", "true", "True"]:
call_with(m, [], {"FLIPPER_ZERO_SHOW_BANNER": v})
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"show_banner": True})
for v in ["false", "False"]:
call_with(m, [], {"FLIPPER_ZERO_SHOW_BANNER": v})
assert load_config() == DEFAULT_COMMAND
assert CONFIG == updated_config({"show_banner": False})
# Test if argparse leave "garbage" in parsing
flipper_command = ["storage", "info", "/ext"]
call_with(m, flipper_command)
assert load_config() == flipper_command
assert CONFIG == DEFAULT_CONFIG
call_with(m, ["--port", "/dev/flipper0"]+flipper_command)
assert load_config() == flipper_command
assert CONFIG == updated_config({"port": "/dev/flipper0"})
call_with(m, flipper_command+["--port", "/dev/flipper0"])
assert load_config() == flipper_command
assert CONFIG == updated_config({"port": "/dev/flipper0"})
def test_show_config(monkeypatch, capsys):
with monkeypatch.context() as m:
call_with(m, ["--port", "/dev/flipper0"])
load_config()
show_config()
captured = capsys.readouterr()
assert captured.out == "show_banner: 0\nhide_command: 0\nport: /dev/flipper0\n"
call_with(m, ["--port", "/dev/flipper1", "--hide-command"])
load_config()
show_config()
captured = capsys.readouterr()
assert captured.out == "show_banner: 0\nhide_command: True\nport: /dev/flipper1\n"
call_with(m, ["--show-banner", "--port", "/dev/flipper1"])
load_config()
show_config()
captured = capsys.readouterr()
assert captured.out == "show_banner: True\nhide_command: 0\nport: /dev/flipper1\n"
def test_read_until_prompt():
f0 = Serial()
simple_prompt = b"Text before\nFlipper prompt\n>: "
f0._out_buffer = simple_prompt
assert read_until_prompt(f0) == simple_prompt.decode()
FLIPPER_SD_INFO_PRINT = """Label: FLIPPER SD
Type: FAT32
3886080KB total
3841024KB free
"""
FLIPPER_SD_INFO = FLIPPER_SD_INFO_PRINT.encode() + b"""
>: ."""
def test_print_until_prompt(capsys):
f0 = Serial()
simple_prompt = b"Text before\nFlipper prompt\n>: "
f0._out_buffer = simple_prompt
print_until_prompt(f0, show_prompt=True)
captured = capsys.readouterr()
assert captured.out == simple_prompt.decode()+"\n"
f0._out_buffer = FLIPPER_SD_INFO
print_until_prompt(f0, show_prompt=True)
captured = capsys.readouterr()
assert captured.out == FLIPPER_SD_INFO.decode()[:-1]+"\n"
f0._out_buffer = FLIPPER_SD_INFO
print_until_prompt(f0, show_prompt=False)
captured = capsys.readouterr()
assert captured.out == FLIPPER_SD_INFO_PRINT
@patch("os.path.exists")
def test_check_file_presence(patch_exists):
# Test missing file
patch_exists.return_value = False
with pytest.raises(SystemExit) as e:
check_file_presence("/tmp/missing_file")
assert e.type == SystemExit
assert e.value.code == 1
# Test existing file
patch_exists.return_value = True
assert check_file_presence("/tmp/existing_file") == True
def test_flipper_init(monkeypatch, capsys):
with pytest.raises(SystemExit) as e:
(command, f0) = flipper_init()
assert e.type == SystemExit
assert e.value.code == 1
captured = capsys.readouterr()
assert captured.out == "Please configure flipper zero serial port\n"
with monkeypatch.context() as m:
call_with(m, [], {"FLIPPER_ZERO_PORT": "/dev/flipper0"})
(command, f0) = flipper_init(s=Serial)
assert command == "help"
STORAGE_READ_01_HEADER = b"""Size: 164
"""
STORAGE_READ_01_CONTENT = b"""In faucibus dignissim ullamcorper.
Nulla quis molestie lacus.
Pellentesque congue dui et felis pharetra eleifend.
Integer magna eros. efficitur sed porta sit amet.
"""
STORAGE_READ_01_FOOTER = b"""
>: ."""
STORAGE_READ_01_RAW = STORAGE_READ_01_HEADER + \
STORAGE_READ_01_CONTENT + \
STORAGE_READ_01_FOOTER
def test_storage_read():
f0 = Serial()
f0._out_buffer = STORAGE_READ_01_RAW
(size, data) = storage_read(f0)
assert size == 164
assert data == STORAGE_READ_01_CONTENT.decode()
def test_save_file(capsys):
mock_write = mock_open()
with patch.object(builtins, "open", mock_write, create=True) as patched_open:
save_file(STORAGE_READ_01_CONTENT.decode(),
"/tmp/file_2_save.txt",
output=False)
captured = capsys.readouterr()
assert captured.out == "Save to /tmp/file_2_save.txt\n"
assert patched_open.mock_calls[2][1][0] == STORAGE_READ_01_CONTENT.decode()
save_file(STORAGE_READ_01_CONTENT.decode(),
"/tmp/file_2_save.txt",
output=True)
captured = capsys.readouterr()
assert captured.out == "Save to /tmp/file_2_save.txt\n" + \
STORAGE_READ_01_CONTENT.decode() + "\n"
def test_download_from_flipper(capsys):
f0 = Serial()
f0._out_buffer = STORAGE_READ_01_RAW
mock_write = mock_open()
with patch.object(builtins, "open", mock_write, create=True) as patched_open:
download_from_flipper(f0, "/tmp/file_2_save.txt", output=False)
captured = capsys.readouterr()
assert captured.out == "Save to /tmp/file_2_save.txt\n"
STORAGE_WRITE_01_HEADER = b"Just write your text data. New line by Ctrl+Enter, exit by Ctrl+C.\n\n"
STORAGE_WRITE_01_FOOTER = b"""
>: """
STORAGE_WRITE_01_OUT = STORAGE_WRITE_01_HEADER + \
STORAGE_READ_01_CONTENT
STORAGE_WRITE_01_RAW = STORAGE_WRITE_01_OUT + \
STORAGE_WRITE_01_FOOTER
@patch("os.path.exists")
def test_upload_to_flipper(patch_exists, capsys):
f0 = Serial()
f0._out_buffer = STORAGE_WRITE_01_RAW
patch_exists.return_value = True
with patch("builtins.open", mock_open(read_data=STORAGE_READ_01_CONTENT)) as mock_file:
upload_to_flipper(f0, "/tmp/file_2_upload.txt")
mock_file.assert_called_with("/tmp/file_2_upload.txt", "rb")
captured = capsys.readouterr()
assert captured.out == STORAGE_WRITE_01_OUT.decode()
def test_check_local_md5():
with patch("builtins.open", mock_open(read_data=STORAGE_READ_01_CONTENT)) as mock_file:
localhash = check_local_md5("/tmp/local_filename.txt")
mock_file.assert_called_with("/tmp/local_filename.txt", "rb")
assert localhash == "9cb4a477cbbf515f7dffb459f1e05594"
GOD_HASH = "9cb4a477cbbf515f7dffb459f1e05594"
BAD_HASH = "a7b073ead2a733491a4a407e777b2e59"
@patch("os.path.exists")
@patch("flipperzero_cli.check_local_md5")
def test_compare_md5(patch_check_local_md5, patch_exists, capsys):
f0 = Serial()
f0._out_buffer = f"{GOD_HASH}\n\n>: ".encode()
patch_exists.return_value = True
patch_check_local_md5.return_value = GOD_HASH
compare_md5(f0, "/tmp/local_filename.txt")
captured = capsys.readouterr()
assert captured.out == f"OK, same hash ({GOD_HASH})\n"
f0 = Serial()
f0._out_buffer = f"{GOD_HASH}\n\n>: ".encode()
patch_exists.return_value = True
patch_check_local_md5.return_value = BAD_HASH
compare_md5(f0, "/tmp/local_filename.txt")
captured = capsys.readouterr()
assert captured.out == f"""KO different hashes:
local: '{BAD_HASH}'
remote: '{GOD_HASH}'
"""
def test_main(monkeypatch, capsys):
with pytest.raises(SystemExit) as e:
main()
assert e.type == SystemExit
assert e.value.code == 1
captured = capsys.readouterr()
assert captured.out == "Please configure flipper zero serial port\n"
with monkeypatch.context() as m:
call_with(m, [], {"FLIPPER_ZERO_PORT": "/dev/flipper0"})
main(s=Serial)
captured = capsys.readouterr()
assert captured.out == "Command: help\n\n"
with monkeypatch.context() as m:
call_with(m, ["--show-config"], {"FLIPPER_ZERO_PORT": "/dev/flipper0"})
main(s=Serial)
captured = capsys.readouterr()
assert captured.out == """show_banner: 0
hide_command: 0
port: /dev/flipper0
Command: help
"""
with monkeypatch.context() as m:
call_with(m, ["--show-banner"], {"FLIPPER_ZERO_PORT": "/dev/flipper0"})
main(s=Serial)
captured = capsys.readouterr()
assert captured.out == "Command: help\n\n\n"
with monkeypatch.context() as m:
mock_write = mock_open()
with patch.object(builtins, "open", mock_write, create=True) as patched_open:
call_with(m, ["--filename=/tmp/to_save.txt",
"storage", "read", "/ext/badusb/demo_macos.txt"],
{"FLIPPER_ZERO_PORT": "/dev/flipper0"})
with pytest.raises(SystemExit) as e:
main(s=Serial)
assert e.type == SystemExit
assert e.value.code == 1
captured = capsys.readouterr()
assert captured.out == "Command: storage read /ext/badusb/demo_macos.txt\nError in storage read\n"
call_with(m, ["--filename=/tmp/to_write.txt",
"storage", "write", "/ext/badusb/demo_macos.txt"],
{"FLIPPER_ZERO_PORT": "/dev/flipper0"})
with pytest.raises(SystemExit) as e:
main(s=Serial)
assert e.type == SystemExit
assert e.value.code == 1
captured = capsys.readouterr()
assert captured.out == "Command: storage write /ext/badusb/demo_macos.txt\n/tmp/to_write.txt is missing.\n"
with monkeypatch.context() as m:
call_with(m, [
"--filename=/tmp/to_md5.txt", "storage", "md5",
"/ext/badusb/demo_macos.txt"],
{"FLIPPER_ZERO_PORT": "/dev/flipper0"})
with pytest.raises(SystemExit) as e:
main(s=Serial)
captured = capsys.readouterr()
assert captured.out == \
"""Command: storage md5 /ext/badusb/demo_macos.txt
/tmp/to_md5.txt is missing.
"""
|
nledez/flipperzero-cli
|
tests/test_cli.py
|
test_cli.py
|
py
| 14,895 |
python
|
en
|
code
| 6 |
github-code
|
6
|
10031616812
|
from PyQt4 import QtCore, QtGui
from ui_fwabar import Ui_Fwabar_Dialog
# create the dialog
class fwabarDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QDialog.__init__(self, parent)
self.ui = Ui_Fwabar_Dialog()
self.ui.setupUi(self)
|
IGNF/SIGOPT
|
sigopt-mecadepi/flood_waste_assessment/tools/fwabardialog.py
|
fwabardialog.py
|
py
| 260 |
python
|
en
|
code
| 1 |
github-code
|
6
|
37314756203
|
import pandas as pd
import sklearn
import matplotlib.pyplot as plt
import seaborn
import load_data
train = load_data.get_data()[0]
testX = load_data.get_data()[1]
inv_labels = load_data.get_inverted_labels()
trainX = train.drop(['SalePrice'], axis = 1)
trainY = train['SalePrice']
def info_discrete(column):
scatter = seaborn.stripplot(x = column, y = train['SalePrice'], data = train)
if column in inv_labels.keys():
scatter.set_xticklabels(inv_labels[column].values())
else:
pass
plt.show()
def unsignificant_deletion():
correlation = train.corrwith(train.SalePrice)
to_delete = list()
for col_name, corr_value in correlation.items():
if corr_value < 0.35 and corr_value > -0.35:
to_delete.append(col_name)
return to_delete
trainX.drop(unsignificant_deletion(), axis = 1, inplace = True)
testX.drop(unsignificant_deletion(), axis = 1, inplace = True)
trainX['Alley'].fillna(2, inplace = True)
trainX['GarageType'].fillna(6, inplace = True)
trainX['GarageYrBlt'].fillna(0, inplace = True)
trainX['GarageFinish'].fillna(3, inplace = True)
trainX['BsmtQual'].fillna(4, inplace = True)
trainX['MasVnrArea'].fillna(0, inplace = True)
trainX['LotFrontage'].fillna(0, inplace = True)
trainX['PoolQC'].fillna(3, inplace = True)
# testX['Alley'].fillna(2, inplace = True)
# testX['GarageType'].fillna(6, inplace = True)
# testX['GarageYrBlt'].fillna(0, inplace = True)
# testX['GarageFinish'].fillna(3, inplace = True)
# testX['BsmtQual'].fillna(4, inplace = True)
# testX['MasVnrArea'].fillna(0, inplace = True)
# testX['LotFrontage'].fillna(0, inplace = True)
# testX['PoolQC'].fillna(3, inplace = True)
# print(trainX['Alley'].unique())
print(testX.info())
print(testX['KitchenQual'].unique())
|
jantar44/reg_house_prices
|
house_prices/house_prices.py
|
house_prices.py
|
py
| 1,771 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14790213249
|
# Given the head of a sorted linked list, delete all duplicates such that each element :
# appears only once. Return the linked list sorted as well.
# input: head = [1,1,2]
# output = [1,2]
class ListNode:
def __init__(self, val, next):
self.val = val
self.next = next
listNode4 = ListNode(4, None)
listNode3 = ListNode(2, listNode4)
listNode2 = ListNode(2, listNode3)
listNode1 = ListNode(2, listNode2)
root = ListNode(1, listNode1)
# 1 -> 2 -> 2 -> 2 -> 4
def removeDup(root):
if root.val == None:
return None
cur = root
while cur != None:
while cur.next != None and cur.val == cur.next.val:
cur.next = cur.next.next
cur = cur.next
return root
root1 = removeDup(root)
while root1 != None:
print(root1.val)
root1 = root1.next
|
SonMichael/algorithm
|
linked_list_remove_duplicate_from_sorted_list.py
|
linked_list_remove_duplicate_from_sorted_list.py
|
py
| 807 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17541028772
|
#/usr/bin/env python
from pwn import *
from Crypto.Util.number import bytes_to_long, long_to_bytes
from Crypto.Cipher import AES
import hashlib
import os
import base64
from gmpy2 import is_prime
class Rng:
def __init__(self, seed):
self.seed = seed
self.generated = b""
self.num = 0
def more_bytes(self):
self.generated += hashlib.sha256(self.seed).digest()
# increase seed by 1 and ensure the seed is 32 bytes long (prepend with NULL bytes)
self.seed = long_to_bytes(bytes_to_long(self.seed) + 1, 32)
self.num += 256
def getbits(self, num=64):
while (self.num < num):
self.more_bytes()
x = bytes_to_long(self.generated)
self.num -= num
self.generated = b""
# this is not called for our primes
if self.num > 0:
self.generated = long_to_bytes(x >> num, self.num // 8)
# ANDs with 0xffff...ffff to ensure only a NUM length number is returned
return x & ((1 << num) - 1)
class DiffieHellman:
def gen_prime(self):
prime = self.rng.getbits(512)
iter = 0
while not is_prime(prime):
iter += 1
prime = self.rng.getbits(512)
print("Generated after", iter, "iterations")
return prime
def __init__(self, seed, prime=None):
self.rng = Rng(seed)
if prime is None:
prime = self.gen_prime()
self.prime = prime
self.my_secret = self.rng.getbits()
self.my_number = pow(5, self.my_secret, prime)
self.shared = 1337
def set_other(self, x):
self.shared ^= pow(x, self.my_secret, self.prime)
def pad32(x):
return (b"\x00"*32+x)[-32:]
def xor32(a, b):
return bytes(x^y for x, y in zip(pad32(a), pad32(b)))
def bit_flip(x):
print("bit-flip str:")
inputstr = b'BA=='
#inputstr = input().strip()
flip_str = base64.b64decode(inputstr)
return xor32(flip_str, x)
def get_values(conn, value):
# receive the bit-flip str:
conn.recvline()
# send the value
conn.send(value + b'\n')
# recv num iterations
num_iter = str(conn.recvline())
bob_num = conn.recvline()
iv_num = conn.recvline()
flag_num = conn.recvline()
results = [
int(num_iter.split(" ")[2:3][0]),
int(bob_num.decode('ascii').split(" ")[2:3][0]),
base64.b64decode(iv_num.decode('ascii')),
base64.b64decode(flag_num.decode('ascii'))
]
return results
def get_num_iter(conn, value):
return get_values(conn, value)[0]
def get_seed(conn):
# loop over each bit of 64-bit number
sol = 0
maxnum = 128 # 128
for i in range(1, maxnum):
# get two values one with and without the ith bit set
n = sol ^ ((2 ** i) - 2)
m = sol | (1 << i)
# base64 encode values
basen = base64.b64encode(bytes(long_to_bytes(n)))
basem = base64.b64encode(bytes(long_to_bytes(m)))
iter_n = get_num_iter(conn, basen)
iter_m = get_num_iter(conn, basem)
print("N: %s [%d], M: %s [%d]" % (basen, iter_n, basem, iter_m))
if(iter_n != iter_m + 1):
sol = sol | (1 << i)
print("SOL:" + " "*(135-maxnum) + bin(sol)[2:])
return long_to_bytes(sol, 16)
def main(conn):
# compute alice_seed
alice_seed = get_seed(conn)
print(alice_seed)
# perform one iteration with arbitrary input to get a sample of values
results = get_values(conn, b'BA==')
bobnum = results[1]
iv = results[2]
ciphertext = results[3]
# compute the encryption/decryption key
alice = DiffieHellman(bit_flip(alice_seed))
alice.set_other(bobnum)
# decrypt the ciphertext
cipher = AES.new(long_to_bytes(alice.shared, 16)[:16], AES.MODE_CBC, IV=iv)
plaintext = cipher.decrypt(ciphertext)
print(plaintext)
if __name__ == '__main__':
HOST = "127.0.0.1"
PORT = 1337
conn = remote(HOST, PORT)
main(conn)
|
cybernatedwizards/CybernatedWizardsCTF
|
2020/DragonCTF_2020/bitflip1/sol.py
|
sol.py
|
py
| 3,818 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30543622706
|
import matplotlib.pyplot as plt
import numpy as np
plt.ion()
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['font.size'] = 16
plt.rcParams['lines.linewidth'] = 2.0
plt.rcParams['lines.markersize'] = 8
plt.rcParams['legend.fontsize'] = 14
class Simulator:
eps = 1e-16
def __init__(self, num_agents = 15, max_iterations = 1000, step_size = None, \
convergence_tol = 0.001, x_bounds = (0,1), y_bounds = (0, 1)):
# convergence_tol : % of dimensions of the room
self.convergence_tol = convergence_tol
# Dimensions of the room
self.x_bounds = x_bounds
self.y_bounds = y_bounds
self.step_size = step_size
self.num_agents = num_agents
self.max_iterations = max_iterations
self.iteration = 0
self.converged_at_iteration = None
self.mean_step = []
self.__initialize_positions()
self.__choose_attractors()
def __find_third_vertex(self, first_vertex, second_vertex):
""" Returns both possible options for the third vertex that makes an
equilateral triangle with two given points"""
# Midpoint:
mid_x, mid_y = 0.5*(first_vertex[0] + second_vertex[0]), 0.5*(first_vertex[1] + second_vertex[1])
# Squared length of side of equilateral triangle:
D2 = (first_vertex[0] - second_vertex[0])**2 + (first_vertex[1] - second_vertex[1])**2
y_diff = first_vertex[1] - second_vertex[1]
if y_diff < Simulator.eps:
# avoid division by zero
y_diff += Simulator.eps
# Negative Reciprocal slope of line joining first and second vertex:
slope = -(first_vertex[0] - second_vertex[0]) / y_diff
# Intercept of perpendicular bisector line between first and second vertex:
intercept = mid_y - slope * mid_x
# For the quadratic formula:
A = 1
B = -2 * mid_x
C = mid_x**2 - (3/4) * D2 /(slope**2 + 1)
Rx = np.roots([A, B, C])
Ry = slope*Rx + intercept
vertex_options = (Rx, Ry)
return vertex_options
def __find_projections(self, target_location_x, target_location_y, current_x, current_y):
R_vect = np.array([target_location_x - current_x, target_location_y - current_y])
Rx_vect = np.array([target_location_x - current_x, 0])
Ry_vect = np.array([0, target_location_y - current_y])
# Make the distance travelled a proportion of R_vect
x_projection = self.step_size * np.dot(Rx_vect, R_vect) / (np.linalg.norm(Rx_vect) + Simulator.eps)
y_projection = self.step_size * np.dot(Ry_vect, R_vect) / (np.linalg.norm(Ry_vect) + Simulator.eps)
signed_projection = np.sign(R_vect) * np.array([x_projection, y_projection])
return (signed_projection[0], signed_projection[1])
def __initialize_positions(self):
# Container for the whole simulation:
self.X = np.zeros((self.num_agents, self.max_iterations + 1))
self.Y = np.zeros((self.num_agents, self.max_iterations + 1))
# Initialize first positions:
self.X[:,0] = np.random.rand(self.num_agents,)
self.Y[:,0] = np.random.rand(self.num_agents,)
def __choose_attractors(self):
if self.num_agents < 3:
raise Exception('The number of agents must be at least 3')
# Populate the options for each agent to follow, anyone but herself
options = np.arange(self.num_agents)
options = np.tile(options,(len(options),1))
options = options[~np.eye(options.shape[0],dtype=bool)].reshape(options.shape[0],-1)
# Pick two random indices to options for two people to follow
# (scale the random number by the range and round.)
# Actually will need to loop here for the second agent because have to make sure not
# choosing same two people:
# Initialize
follows = np.zeros((self.num_agents, 2))
# First attractor:
follows[:, 0, np.newaxis] = np.round( (options.shape[1] - 1) * np.random.rand(self.num_agents, 1) ).astype(int)
# Second attractor:
for agent in range(self.num_agents):
firstDraw = follows[agent,0]
# Initialize:
secondDraw = firstDraw
while secondDraw == firstDraw:
# Want a different random draw from the options
secondDraw = np.round( (options.shape[1] - 1) * np.random.rand() ).astype(int)
follows[agent,1] = secondDraw
follows=follows.astype(int)
self.first_attractor = options[np.arange(options.shape[0]), follows[:,0], np.newaxis]
self.second_attractor = options[np.arange(options.shape[0]), follows[:,1], np.newaxis]
def _update_positions(self):
"""
This allows each agent to jump directly to the third vertex that would create an equilateral triangle
with the agent and the agent's two targets. However, everyone is jumping at the same time so these
triangles are not likely to be formed until later in the simulation (if ever)
"""
if self.step_size is not None:
if self.step_size > 1:
raise Exception('The step size should be less than 1')
for agent in range(self.num_agents):
# Find the points where you want to go to complete the triangle
first_vertex = (self.X.item((self.first_attractor.item(agent), self.iteration)), \
self.Y.item(self.first_attractor.item(agent), self.iteration))
second_vertex = (self.X.item((self.second_attractor.item(agent), self.iteration)), \
self.Y.item(self.second_attractor.item(agent), self.iteration))
options_x, options_y = self.__find_third_vertex(first_vertex, second_vertex)
# Find the closest of the two vertices to your current position, or the one that is inside the room:
# For now, just don't update position if both are out of bounds
out_of_bounds = (options_x > self.x_bounds[1]) | (options_x < self.x_bounds[0]) | \
(options_y > self.y_bounds[1]) | (options_y < self.y_bounds[0])
options_x = options_x[~out_of_bounds]
options_y = options_y[~out_of_bounds]
current_x = self.X[agent, self.iteration]
current_y = self.Y[agent, self.iteration]
# Update the next position
if len(options_x) > 1:
# Distance to first & second options:
D1 = ( (options_x[0] - current_x)**2 + (options_y[0] - current_y)**2 )**0.5
D2 = ( (options_x[1] - current_x)**2 + (options_y[1] - current_y)**2 )**0.5
closest_ind = np.argmin([D1, D2])
if self.step_size is not None:
x_projection, y_projection = self.__find_projections(options_x.item(closest_ind), \
options_y.item(closest_ind), current_x, current_y)
self.X[agent, self.iteration + 1] = current_x + x_projection
self.Y[agent, self.iteration + 1] = current_y + y_projection
else:
self.X[agent, self.iteration + 1] = options_x[closest_ind]
self.Y[agent, self.iteration + 1] = options_y[closest_ind]
elif len(options_x) == 1:
if self.step_size is not None:
x_projection, y_projection = self.__find_projections(options_x.item(0), \
options_y.item(0), current_x, current_y)
self.X[agent, self.iteration + 1] = current_x + x_projection
self.Y[agent, self.iteration + 1] = current_y + y_projection
else:
self.X[agent, self.iteration + 1] = options_x
self.Y[agent, self.iteration + 1] = options_y
else: # Don't change position
self.X[agent, self.iteration + 1] = current_x
self.Y[agent, self.iteration + 1] = current_y
def plot_positions(self, initialize_plot, plot_sides = False, zoom = False):
if initialize_plot:
# Setting the x and y data explictly for dynamic plot update only works for plot, not scatter:
# Going to follow the first attractor with a different color
self.ax1.plot(self.X[0, self.iteration], self.Y[0, self.iteration], 'r.')
self.ax1.plot(self.X[self.first_attractor.item(0), self.iteration], \
self.Y[self.first_attractor.item(0), self.iteration],'r+')
self.ax1.plot(self.X[self.second_attractor.item(0), self.iteration], \
self.Y[self.second_attractor.item(0), self.iteration],'r+')
self.ax1.plot(self.X[1:, self.iteration], self.Y[1:, self.iteration],'b.')
self.ax1.set_aspect('equal')
self.ax1.set_xlim(self.x_bounds[0], self.x_bounds[1])
self.ax1.set_ylim(self.y_bounds[0], self.y_bounds[1])
self.ax1.set_ylabel("Y")
self.ax1.set_xlabel("X")
self.ax1.set_title("Position of Agents")
else:
# Plot the new position
self.ax1.set_title("Iteration = {}".format(self.iteration))
for lin_num, line in enumerate(self.ax1.lines):
if lin_num==0:
line.set_xdata(self.X[0, self.iteration])
line.set_ydata(self.Y[0, self.iteration])
elif lin_num==1:
line.set_xdata(self.X[self.first_attractor.item(0), self.iteration - 1])
line.set_ydata(self.Y[self.first_attractor.item(0), self.iteration - 1])
elif lin_num==2:
line.set_xdata(self.X[self.second_attractor.item(0), self.iteration - 1])
line.set_ydata(self.Y[self.second_attractor.item(0), self.iteration - 1])
else:
line.set_xdata(self.X[1:, self.iteration])
line.set_ydata(self.Y[1:, self.iteration])
self.fig.canvas.draw()
# This is crucial for viewing the plots from the command line:
try:
plt.pause(0.5)
except Exception:
pass
if plot_sides:
for agent in range(self.num_agents):
# Grab the positions for the attractors of each agent & plot the triangle in green at the end
X_triangle = np.hstack((self.X[agent, self.iteration], \
self.X[self.first_attractor.item(agent), self.iteration], \
self.X[self.second_attractor.item(agent), self.iteration], \
self.X[agent, self.iteration]))
Y_triangle = np.hstack((self.Y[agent, self.iteration], \
self.Y[self.first_attractor.item(agent), self.iteration], \
self.Y[self.second_attractor.item(agent), self.iteration], \
self.Y[agent, self.iteration]))
self.ax1.plot(X_triangle, Y_triangle, '-g')
if zoom:
# Zoom In on the final positions
self.ax1.set_xlim(0.9 * min(self.X[:, self.iteration]), 1.1 * max(self.X[:, self.iteration]))
self.ax1.set_ylim(0.9 * min(self.Y[:, self.iteration]), 1.1 * max(self.Y[:, self.iteration]))
self.ax1.set_aspect('equal')
def run(self, plot_trajectories = True, plot_convergence = True):
if plot_trajectories:
self.fig, self.ax1 = plt.subplots(nrows = 1, ncols = 1, figsize=(8, 8)) # two axes on figure
self.plot_positions(initialize_plot = True)
while self.iteration < self.max_iterations:
# Check for convergence using mean step size for all agents:
self.mean_step.append(np.mean( ( (self.X[:, self.iteration, np.newaxis] \
- self.X[:, self.iteration - 1, np.newaxis] )**2 \
+ (self.Y[:, self.iteration, np.newaxis] \
- self.Y[:, self.iteration - 1, np.newaxis] )**2 )**0.5 ) )
# Define convergence as once the mean step size has dropped below the threshold for 100 iterations
# Stop the simulation once converged.
if self.iteration > 100: # Don't bother with convergence rules unless dealing with a significant simulation
if all( ms <= self.convergence_tol for ms in self.mean_step[self.iteration - 100: self.iteration + 1] ):
self.converged_at_iteration = self.iteration
self.X = self.X[:, np.arange(self.iteration+1)]
self.Y = self.Y[:, np.arange(self.iteration+1)]
break
self._update_positions()
# Update
self.iteration += 1
if plot_trajectories:
self.plot_positions(initialize_plot = False)
if plot_convergence:
# Plot the end positions of the agents, even if we weren't plotting
# their trajectories throughout, along with the sides of the
# triangles and the convergence graph
plot_sides = True
if not plot_trajectories:
self.fig, self.ax1 = plt.subplots(nrows = 1, ncols = 1, figsize=(8, 8))
initialize = True
else:
initialize = False
#if self.step_size is not None:
# zoom = True
#else:
# zoom = False
self.plot_positions(initialize, plot_sides)
self.fig2, self.ax2 = plt.subplots(nrows = 1, ncols = 1, figsize=(8, 4))
self.ax2.plot(self.mean_step)
self.ax2.set_ylabel("Mean Step Size")
self.ax2.set_xlabel("Iteration")
self.ax2.set_title("Motion of Agents")
|
liminal-learner/Chaos
|
Simulator.py
|
Simulator.py
|
py
| 14,112 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32822098386
|
# coding:utf-8
from flask import request
from flask import Flask, render_template
from controller import search, getPage, get_suggestion
import sys
import json
# reload(sys)
# sys.setdefaultencoding('utf-8')
app = Flask(__name__)
@app.route('/')
def index():
return "the server is running!"
# return render_template('/result.html',name = 'zhangsan',)
@app.route('/search', methods=['GET'])
def do_search():
print(request.args)
params = {
'query': request.args.get('query'),
'method': request.args.get('method')
}
res = search(params)
return json.dumps({
'status': 1,
'result': res['result'],
'time': res['time']
}, ensure_ascii=False)
@app.route('/page', methods=['GET'])
def page():
docId = request.args.get('id')
res = getPage(docId)
return json.dumps({
'status': 1,
'page': res
}, ensure_ascii=False)
@app.route('/suggestion', methods=['GET'])
def suggestion():
query =request.args.get('query')
res = get_suggestion(query)
return json.dumps({
'status':1,
'queries':res
}, ensure_ascii=False)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000, debug=False)
|
QimingZheng/WSM-Project-WikiSearch
|
app/index.py
|
index.py
|
py
| 1,221 |
python
|
en
|
code
| 4 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.