seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
18537038929
|
# prob_link: https://www.codingninjas.com/codestudio/problems/find-duplicate-in-array_8230816?challengeSlug=striver-sde-challenge&leftPanelTab=0
def findDuplicate(arr:list, n:int):
# Write your code here.
# Returns an integer.
p = [0]*(n+1)
for x in arr:
p[x]+=1
if p[x]>1:
return x
|
Red-Pillow/Strivers-SDE-Sheet-Challenge
|
P10_Find_Duplicate in_Array.py
|
P10_Find_Duplicate in_Array.py
|
py
| 336 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21397302549
|
#!/usr/bin/env python3
import argparse
import os
import re
import dataclasses
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, List
"""
Supports following cases:
1. Master version x.y.z needs to be bumped to x.y.z when preparing for official release:
git checkout cluster-test
git merge master
# version = x.y.z
version_bumper.py
# version = x.y.z+1
version_bumper.py --part=minor
# version = x.y+1.0
version_bumper.py --part=major
# version = x+1.0.0
2. Master version x.y.z needs to be bumped to x.y.z-mr-1 when making dev release from feature branch:
git co 123-my-branch
# version = x.y.z
version_bumper.py --mr 123
# version = x.y.z-123-1
And then another call should just bump the dev-version:
version_bumper.py --mr 123
# version = x.y.z-123-2
"""
@dataclass
class Version:
major: int
minor: int
patch: int
mr: int # merge request id
dev: int # sequentially increasing number
def __str__(self):
mr = f"-{self.mr}" if self.mr > 0 else ''
dev = f"-{self.dev}" if self.dev > 0 else ''
return f'{self.major}.{self.minor}.{self.patch}{mr}{dev}'
def bump(self, part: str):
self.__dict__[part] += 1
if part == 'major':
self.minor = self.patch = 0
if part == 'minor':
self.patch = 0
def clone(self) -> 'Version':
return dataclasses.replace(self)
def read_current_version(filepath: Path) -> Version:
for line in filepath.read_text().splitlines():
ver = parse_version(line)
if ver is not None:
return ver
raise RuntimeError('version could not be parsed from ' + str(filepath))
# match X.Y.Z or X.Y.Z-W
# broken down at https://regex101.com/r/IAccOs/3
main_regex = r'TAG\s*\?=\s*?(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)[\-\.]?(?P<details>[\-\w]+)?'
main_pattern = re.compile(main_regex)
def parse_version(line: str) -> Optional[Version]:
match = main_pattern.match(line)
if not match:
return None
ver = Version(major=int(match.group('major')),
minor=int(match.group('minor')),
patch=int(match.group('patch')),
mr=0,
dev=0)
details = match.group('details')
if details is not None:
parse_details(details, ver)
return ver
# match X-Y
# broken down at https://regex101.com/r/jtlQ54/3
details_regex = r'(?P<mr>\d+)[\-](?P<dev>\d+)'
details_pattern = re.compile(details_regex)
def parse_details(details: str, ver: Version):
details_match = details_pattern.match(details)
if details_match:
ver.mr = int(details_match.group('mr'))
ver.dev = int(details_match.group('dev'))
def replace_in_files(curr_ver: Version, new_ver: Version, files: List[Path]):
for path in files:
replace_in_file(path, curr_ver, new_ver)
def replace_in_file(filepath: Path, curr_ver: Version, new_ver: Version):
content = filepath.read_text()
new_content = content.replace(str(curr_ver), str(new_ver))
if content != new_content:
filepath.write_text(new_content)
print(f'Version bumped {curr_ver} -> {new_ver} in {filepath}')
else:
raise RuntimeError(f'Version "{curr_ver}" not found in {filepath}')
def project_root() -> Path:
"""Return Racetrack root dir"""
return Path(os.path.abspath(__file__)).parent.parent.absolute()
def bump_version_in_files(version_path: Path, _args, files: List[Path], prod_files: List[Path]):
orig_version = read_current_version(version_path)
if _args.current:
print(orig_version)
return
new_version = orig_version.clone()
if _args.mr and int(_args.mr) != 0:
new_version.mr = int(_args.mr)
if new_version.mr != 0:
new_version.bump('dev')
else:
new_version.bump(_args.part)
files += prod_files
replace_in_files(orig_version, new_version, files)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--current', action='store_true', help='print current version')
parser.add_argument('--mr', help='set merge request number')
parser.add_argument('--part', help='defines which part to bump: major, minor, patch, dev', default="patch")
files_with_version = [
project_root() / 'Makefile',
]
# files bumped in official (non-dev) releases only
prod_files_with_version = [
project_root() / 'racetrack_client/racetrack_client/__init__.py',
]
args = parser.parse_args()
path = project_root() / 'Makefile'
bump_version_in_files(path, args, files_with_version, prod_files_with_version)
|
TheRacetrack/racetrack
|
utils/version_bumper.py
|
version_bumper.py
|
py
| 4,748 |
python
|
en
|
code
| 27 |
github-code
|
6
|
26297662140
|
import numpy as np
import csv
from Perceptron import Perceptron
#Creation d'un objet Perceptron
perceptron_and = Perceptron(4, 100, 0.01)
inputs = np.array([[0,0],[0,1],[1,0],[1,1]])
outputs = np.array([0,0,0,1])
perceptron_and.train(inputs, outputs)
with open('poids.csv', 'w', newline='') as csvfile:
fieldnames = ['w0', 'w1', 'w2']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerow({'w0': perceptron_and.get_w0(), 'w1': perceptron_and.get_w1(), 'w2': perceptron_and.get_w2()})
|
BaptistePeyrard/python
|
td2/and.py
|
and.py
|
py
| 545 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6315981642
|
from flask import Blueprint, render_template, flash, request, redirect, url_for, jsonify, abort
from app.extensions import cache, pages
from app.tasks import long_task
import flam3, io, base64, struct
from PIL import Image
main = Blueprint('main', __name__)
@main.route('/')
@cache.cached(timeout=1000)
def home():
return render_template('index.html')
@main.route('/task', methods=['GET', 'POST'])
def index():
return render_template("longtask.html")
@main.route('/adder')
def adder():
return render_template("adder.html")
@main.route('/api/add_numbers')
def add_numbers():
a = request.args.get('a', 0, type=int)
b = request.args.get('b', 0, type=int)
return jsonify(result=a + b)
@main.route('/flam3')
def flam3_html():
return render_template("flam3.html")
def hex_to_rgb(hexstr):
return struct.unpack('BBB', b''.fromhex(hexstr[1:]))
@main.route('/api/gen_flam3')
def gen_flam3():
point_count = request.args.get('point_count', 0, type=int)
back_color = request.args.get('back_color', "#42426f", type=hex_to_rgb)
front_color = request.args.get('front_color', "#f4a460", type=hex_to_rgb)
selection_limiter = request.args.get('selection_limiter', None, type=str)
colors = (back_color, front_color)
print('selection is', selection_limiter)
# Make sure selection limiter is sane
if selection_limiter is None:
selection_limiter = [False]*point_count
else:
selection_limiter = [bool(int(i)) for i in selection_limiter.split(',')]
# Generate the fractal
print(selection_limiter)
mat_points = flam3.Fractal(point_count=point_count, selection_limiter=selection_limiter).execute()
# Convert fractal data to a matrix of color
img_mat = flam3.point_to_image_mat(mat_points)
img = flam3.mat_to_color(img_mat, colors=colors)
# Save data to BytesIO file object
im = Image.fromarray(img)
f = io.BytesIO()
im.save(f, format='png')
f.seek(0)
return jsonify(result="data:image/png;base64,"+base64.b64encode(f.read()).decode())
@main.route('/status/<task_id>')
def taskstatus(task_id):
task = long_task.AsyncResult(task_id)
if task.state == 'PENDING':
# job did not start yet
response = {
'state': task.state,
'current': 0,
'total': 1,
'status': 'Pending...'
}
elif task.state != 'FAILURE':
response = {
'state': task.state,
'current': task.info.get('current', 0),
'total': task.info.get('total', 1),
'status': task.info.get('status', '')
}
if 'result' in task.info:
response['result'] = task.info['result']
else:
# something went wrong in the background jobself.get
response = {
'state': task.state,
'current': 1,
'total': 1,
'status': str(task.info), # this is the exception raised
}
return jsonify(response)
@main.route('/<path:folder>/<path:path>/')
def page(folder, path):
return render_template('page.html', folder=folder, page=pages.get_or_404(folder, path), page_title=path)
@main.route('/<path:folder>/')
def folder(folder):
folder_dict = sorted(pages.get_or_404(folder=folder))
page_title = folder.replace('_', ' ').title()
return render_template('folder.html', folder=folder, pages=folder_dict, page_title=page_title)
@main.route('/topics/')
def folders():
return render_template('folders.html', folders=pages._pages)
|
akotlerman/flask-website
|
app/controllers/main.py
|
main.py
|
py
| 3,537 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2727040132
|
import pathlib
data_folder = pathlib.Path('data')
# print(data_folder.exists(), data_folder.is_dir())
def make_text(i):
text = ""
text += str(i) + "\n"
text += str(i * 24) + "\n"
text += (i * 12) * "#"
return text
for i in range(20):
label = str(i).zfill(4) + "." + ("ihatezoom" * i)
f = pathlib.Path(label)
out = data_folder / f
out.write_text(make_text(i))
|
elliewix/IS305-2022-Fall
|
week 5/monday.py
|
monday.py
|
py
| 399 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72536213308
|
# 爬取buff平台的商品信息
import asyncio
import aiohttp
from lxml.html import etree
import re
import json
import traceback
import os
from util import fetch_url, get_current_time_str
from models import PriceInfo
import urllib
async def get_goods_info(url, session) -> PriceInfo:
# 获取商品信息
print(url)
# 最多重试3次
for i in range(3):
try:
html_content = await fetch_url(url, session)
result = await parse_html(html_content, session)
result.url = url
print(vars(result))
return result
except:
traceback.print_exc()
continue
# 上抛异常
raise RuntimeError("商品信息获取失败")
def read_headers():
# 读取headers.txt文件,返回键值对
filePath = os.path.join(os.path.dirname(__file__), 'headers.txt')
with open(filePath, 'r', encoding='utf-8') as f:
text = f.read()
# 键值对
headers = {}
for line in text.split('\n'):
if line:
key, value = line.split(': ')
headers[key] = value
return headers
async def get_sell_info(goods_id, session):
# 获取在售情况
sell_info_url = f"https://buff.163.com/api/market/goods/sell_order?game=dota2&goods_id={goods_id}&page_num=1&sort_by=default&mode=&allow_tradable_cooldown=1&_=1693538617921"
sell_info = json.loads(await fetch_url(sell_info_url, session))
return sell_info
async def get_buy_info(goods_id, session):
# 获取求购情况
buy_info_url = f"https://buff.163.com/api/market/goods/buy_order?game=dota2&goods_id={goods_id}&page_num=1&_=1693540558052"
buy_info = json.loads(await fetch_url(buy_info_url, session))
return buy_info
async def get_deal_info(goods_id, session):
# 获取成交情况
deal_info_url = f"https://buff.163.com/api/market/goods/bill_order?game=dota2&goods_id={goods_id}&_=1693543131027"
deal_info = json.loads(await fetch_url(deal_info_url, session))
return deal_info
async def parse_html(htmlContent, session) -> PriceInfo:
# 解析html文本,返回商品信息
root = etree.HTML(htmlContent)
# 商品名称
try:
goods_name = root.xpath('//div[@class="detail-cont"]/div[1]/h1/text()')[0]
except:
print(htmlContent)
raise RuntimeError("商品名称获取失败")
# 在售商品数量
goods_num = root.xpath('//ul[@class="new-tab"]/li[1]/a/text()')[0]
goods_num = re.findall("当前在售\((\d+)\)", goods_num)[0]
goods_num = int(goods_num)
# steam市场链接
steam_url = root.xpath('//div[@class="detail-summ"]/a/@href')[0]
goods_id = root.xpath('//a[@class="i_Btn i_Btn_mid i_Btn_D_red btn-supply-buy"]/@data-goodsid')[0]
# 异步获取在售情况、求购情况和成交情况
sell_info_task = get_sell_info(goods_id, session)
buy_info_task = get_buy_info(goods_id, session)
deal_info_task = get_deal_info(goods_id, session)
sell_info, buy_info, deal_info = await asyncio.gather(sell_info_task, buy_info_task, deal_info_task)
# 在售最低价
lowest_price = sell_info['data']['items'][0]['price'] if sell_info['data']['items'] else "0"
# 求购最高价
highest_price = buy_info['data']['items'][0]['price'] if buy_info['data']['items'] else "0"
# 最新成交价
try:
latest_price = deal_info['data']['items'][0]['price'] if deal_info['data']['items'] else "0"
except:
print("未登录无法获取buff最新成交价")
latest_price = None
result = PriceInfo()
result.min_price = lowest_price
result.highest_buy_price = highest_price
result.name_cn = goods_name.strip()
result.steamUrl = steam_url
result.update_time = get_current_time_str()
result.latest_sale_price = latest_price
result.name_en = steam_url.split('/')[-1].split('?')[0]
# url解码
result.name_en = urllib.parse.unquote(result.name_en).strip()
result.goods_id = goods_id
return result
async def getGoodsUrls(session):
# 获取商品链接
url = "https://buff.163.com/api/market/goods?game=dota2&page_num={}&_=1693544159600"
urls = []
for pageNum in range(1, 6):
goods_info = json.loads(await fetch_url(url.format(pageNum), session))
goods_base_url = "https://buff.163.com/goods/{}?from=market#tab=selling"
urls += [goods_base_url.format(i["id"]) for i in goods_info['data']['items']]
return urls
def update_price_info(priceInfo: PriceInfo) -> PriceInfo:
url = priceInfo.url
if not url:
# TODO: 通过hash_name获取url
raise RuntimeError("url为空")
async def task():
async with aiohttp.ClientSession() as session:
new_price_info = await get_goods_info(url, session)
return new_price_info
return asyncio.get_event_loop().run_until_complete(task())
|
ZangYUzhang/aeyl-steam
|
buff_spider/__init__.py
|
__init__.py
|
py
| 4,898 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7265936310
|
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from pylab import *
mpl.rcParams['font.sans-serif'] = ['SimHei']
res = {}
for i in range(1, 16): # 统计15天的新增人数
fileNameStr = './202012' + str(i).zfill(2) + '.csv' # 产生文件名进行读取
df = pd.read_csv(fileNameStr, encoding='utf-8')
df['increase'] = df['increase'].astype(np.int)
for idx in range(len(df)): # 遍历所有国家
if df['country'][idx] in res.keys():
res[df['country'][idx]] = res[df['country'][idx]] + df['increase'][idx]
else:
res[df['country'][idx]] = df['increase'][idx]
lst = sorted(res.items(), key=lambda x:x[1], reverse=True) # 按新增人数进行排序
country = []
increase = []
for i in range(10): # 取出前10的国家
country.append(lst[i][0])
increase.append(lst[i][1])
plt.title("20201201~20201215 新冠病毒新增人数国家TOP10")
plt.bar(country, increase, label='increase')
plt.legend()
plt.show()
|
Seizzzz/DailyCodes
|
Course 202009/Python/final/c.py
|
c.py
|
py
| 1,009 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74286948987
|
class Solution:
def countStudents(self, students: List[int], sandwiches: List[int]) -> int:
# students=collections.Counter(students)
# for sand in sandwiches:
# if not students[sand]:
# break
# students[sand]-=1
# return sum(students.values())
# or
# for i,sand in enumerate(sandwiches):
# if sand in students:
# students.remove(sand)
# else:
# return len(sandwiches)-i
# return 0
# or
while students:
if sandwiches[0] in students:
students.remove(sandwiches[0])
sandwiches.pop(0)
else:
break
return len(sandwiches)
|
aameen07/Leetcode_Solutions
|
1700-number-of-students-unable-to-eat-lunch/1700-number-of-students-unable-to-eat-lunch.py
|
1700-number-of-students-unable-to-eat-lunch.py
|
py
| 868 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31963127591
|
import sys
case = int(input())
cnt = 0
for c in range(case):
word = sys.stdin.readline().strip()
letter = []
for w in word:
if w not in letter:
letter.append(w)
elif w in letter:
if letter[-1] == w:
letter.append(w)
else:
break
if len(letter) == len(word):
cnt = cnt + 1
print(cnt)
|
yongwoo-jeong/Algorithm
|
백준/Silver/1316.그룹 단어 체커/그룹 단어 체커.py
|
그룹 단어 체커.py
|
py
| 407 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31291343127
|
"""
This module customizes the MayaVi2 UI and adds callbacks to the CitcomS
visualization plugins.
"""
# Enthought library imports.
from enthought.envisage.workbench.action.action_plugin_definition import \
Action, Group, Location, Menu, WorkbenchActionSet
###############################################################################
citcoms_group = Group(id="CitcomsMenuGroup",
location=Location(path="MenuBar",
after="FileMenuGroup"))
citcoms_menu = Menu(
id = "CitcomsMenu",
name = "&CitcomS",
location = Location(path="MenuBar/CitcomsMenuGroup"),
)
citcoms_open_menu = Menu(
id = "CitcomsOpenMenu",
name = "&Open",
location = Location(path="MenuBar/CitcomsMenu/additions"),
)
citcoms_modules_menu = Menu(
id = "CitcomsModulesMenu",
name = "&Modules",
location = Location(path="MenuBar/CitcomsMenu/additions",
after="CitcomsOpenMenu"),
)
citcoms_filters_menu = Menu(
id = "CitcomsFiltersMenu",
name = "&Filters",
location = Location(path="MenuBar/CitcomsMenu/additions",
after="CitcomsModulesMenu"),
)
###############################################################################
# old name: enthought.mayavi.plugins.OpenCitcomSFILES.OpenCitcomSVTKFILE
citcoms_open_vtk = Action(
id = "OpenCitcomsVTKFile",
class_name = "citcoms_display.actions.OpenVTKAction",
name = "CitcomS &VTK file",
#image = "images/new_scene.png",
tooltip = "Open a CitcomS VTK data file",
description = "Open a CitcomS VTK data file",
locations = [Location(path="MenuBar/CitcomsMenu/CitcomsOpenMenu/additions")]
)
# old name: enthought.mayavi.plugins.OpenCitcomSFILES.OpenCitcomSHDFFILE
citcoms_open_hdf = Action(
id = "OpenCitcomsHDF5File",
class_name = "citcoms_display.actions.OpenHDF5Action",
name = "CitcomS &HDF5 file",
#image = "images/new_scene.png",
tooltip = "Open a CitcomS HDF5 data file",
description = "Open a CitcomS HDF5 data file",
locations = [Location(path="MenuBar/CitcomsMenu/CitcomsOpenMenu/additions",
after="OpenCitcomsVTKFile"),]
)
# old name: enthought.mayavi.plugins.CitcomSFilterActions.CitcomSreduce
citcoms_reduce_filter = Action(
id = "CitcomsReduceFilter",
class_name = "citcoms_display.actions.ReduceFilterAction",
name = "&Reduce Grid",
#image = "images/new_scene.png",
tooltip = "Display a ReduceGrid for interpolation",
description = "Display a ReduceGrid for interpolation",
locations = [Location(path="MenuBar/CitcomsMenu/CitcomsFiltersMenu/additions"),]
)
# old name: enthought.mayavi.plugins.CitcomSFilterActions.CitcomSshowCaps
citcoms_cap_filter = Action(
id = "CitcomsShowCapsFilter",
class_name = "citcoms_display.actions.ShowCapsFilterAction",
name = "&Show Caps",
#image = "images/new_scene.png",
tooltip = "Display a specified range of caps",
description = "Display a specified range of caps",
locations = [Location(path="MenuBar/CitcomsMenu/CitcomsFiltersMenu/additions"),]
)
###############################################################################
action_set = WorkbenchActionSet(
id = 'citcoms_display.action_set',
name = 'CitcomsActionSet',
groups = [citcoms_group],
menus = [citcoms_menu,
citcoms_open_menu,
citcoms_modules_menu,
citcoms_filters_menu,],
actions = [citcoms_open_vtk,
citcoms_open_hdf,
citcoms_reduce_filter,
citcoms_cap_filter,]
)
###############################################################################
requires = []
extensions = [action_set]
|
geodynamics/citcoms
|
visual/Mayavi2/citcoms_display/custom_ui.py
|
custom_ui.py
|
py
| 3,862 |
python
|
en
|
code
| 39 |
github-code
|
6
|
73016401788
|
import os
import subprocess
def check_suffix(filepath):
suffix = [".h", ".i", ".c", ".cc", "cpp"]
# .i used by tensorflow for helper macros and typemaps
for s in suffix:
if filepath.endswith(s):
return 1
return 0
def get_file_loc(filepath):
cmd = "cloc " + filepath
cmd_result = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
cloc_info = cmd_result.stdout.readlines()
text = bytes.decode(cloc_info[-2]).split(" ")[-1].strip()
return int(text) if text != "" else 0
class Project:
def __init__(self, paths):
self.statistic = {}
self.loc = 0
for path in paths:
self.search_file(path)
def search_file(self, path):
for i in os.listdir(path):
child = path + "/" + i
if os.path.isfile(child):
if check_suffix(child):
print(child)
self.loc += get_file_loc(child)
self.get_statistic(child)
if os.path.isdir(child):
self.search_file(child)
def get_statistic(self, filename):
cmd = "./token_processor/build/token-processor " + filename
cmd_result = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
for l in cmd_result.stdout.readlines():
line = bytes.decode(l)[:-1]
if line.startswith("Py") or line.startswith("PY"):
if line in self.statistic:
self.statistic[line] += 1
else:
self.statistic[line] = 1
if __name__=="__main__":
tensorflow_path = [
"../corpus/tensorflow/tensorflow/tensorflow/python",
"../corpus/tensorflow/tensorflow/tensorflow/lite",
]
pytorch_path = [
"../corpus/pytorch/pytorch/caffe2",
"../corpus/pytorch/pytorch/torch/csrc",
"../corpus/pytorch/pytorch/tools/autograd/templates",
]
projs = {"tensorflow": tensorflow_path, "pytorch": pytorch_path}
for (name, path) in projs.items():
print("===== {} =====".format(name))
proj = Project(path)
print("interface loc : {}".format(proj.loc))
apis = proj.statistic
apis_sorted = sorted(apis.items(), key = lambda d : d[1], reverse = True)
path_prefix = "../data/"
suffix = ".capi.dat"
with open(path_prefix + name + suffix, 'w') as f:
for (k, v) in apis_sorted:
f.write("{}:{}".format(k, v) + '\n')
|
S4Plus/pyceac
|
code/base_statistic_ex.py
|
base_statistic_ex.py
|
py
| 2,501 |
python
|
en
|
code
| 3 |
github-code
|
6
|
17246495292
|
#!/usr/bin/env python2
import argparse
import ast
import json
import logging
import os
from collections import namedtuple
import tqdm
import sys
sys.path.append('.')
print(sys.path)
from srcseq.astunparser import Unparser, WriterBase
def file_tqdm(fobj):
return tqdm(fobj, total=get_number_of_lines(fobj))
SrcASTToken = namedtuple("SrcASTToken", "text type lineno col_offset")
logging.basicConfig(level=logging.INFO)
class MyListFile(list, WriterBase):
def write(self, text, type=None, node=None):
text = text.strip()
lineno = node and node.lineno
col_offset = node and node.col_offset
if len(text) > 0:
# write `Str` as it is. `Num` will be kept as a string.
text = eval(text) if type == "Str" else text
self.append(SrcASTToken(text, type, lineno, col_offset))
def flush(self):
pass
def my_tokenize(code_str):
t = ast.parse(code_str)
lst = MyListFile()
Unparser(t, lst)
return lst
def main():
parser = argparse.ArgumentParser(
description="Generate datapoints from source code",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--files_path", "-f", required=True,
help="Filepath with the filenames to be parsed")
parser.add_argument("--save", "-o", default="/tmp/dps.jsonl",
help="Filepath with the output dps")
parser.add_argument("--base_dir", "-b",
help="Base dir to append for the fps."
" If not given, use the dir of `--files_path`.")
args = parser.parse_args()
args.base_dir = args.base_dir or os.path.dirname(args.files_path)
if os.path.exists(args.save):
os.remove(args.save)
num_dps = 0
logging.info("Loading files from: {}".format(args.base_dir))
with open(args.files_path, "r") as fin, open(args.save, "w") as fout:
for i_line, line in enumerate(file_tqdm(fin)):
rel_src_fp = line.strip()
abs_src_fp = os.path.join(args.base_dir, rel_src_fp)
try:
values, types_, linenos, col_offsets = zip(*my_tokenize(open(abs_src_fp).read()))
if len(values) > 1:
json.dump({
'rel_src_fp': rel_src_fp,
'values': values,
'types': types_,
'linenos': linenos,
'col_offsets': col_offsets,
}, fp=fout)
fout.write("\n")
num_dps += 1
else:
# logging.info("In processing {}-th file `{}`: empty token list.".format(i_line, rel_src_fp))
pass
except Exception as e:
logging.warning("In processing {}-th file `{}`:\n\t{}".format(i_line, rel_src_fp, e))
continue
logging.info("Wrote {} datapoints to {}".format(num_dps, args.save))
if __name__ == "__main__":
main()
|
ReversalS/coop-code-learning
|
views/PythonExtractor/source/srcseq/generate_data.py
|
generate_data.py
|
py
| 2,978 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22293771882
|
#!/usr/bin/python3
"""This module contains decorator functions for the views. These includes:
- token_required
"""
import jwt
from functools import wraps
from flask import request, make_response
from os import environ
from flask import jsonify
SECRET_KEY = environ.get('SECRET_KEY')
def token_required(f):
"""Checks if a token is passed by the front-end to the endpoint"""
@wraps(f)
def decorator(*args, **kwargs):
token = request.headers.get('x-token') or request.args.get('x-token')
try:
data = jwt.decode(token, SECRET_KEY, algorithms='HS256')
user_email = data['email']
return f(user_email, *args, **kwargs)
except AttributeError:
response = make_response(jsonify({'error': 'token is missing'}), 403)
response.headers['location'] = 'http://0.0.0.0:5000/login'
return response
except Exception as e:
print(e)
response = make_response(jsonify({'error': 'invalid token'}), 403)
response.headers['location'] = 'http://0.0.0.0:5000/login'
return response
return decorator
|
Sonlowami/CaseShare
|
src/api/v1/views/decorators.py
|
decorators.py
|
py
| 1,141 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4971500738
|
import socket
import threading
import datetime
def acao_cliente(client_socket, client_address):
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"Conexão recebida de {client_address[0]}:{client_address[1]} em {current_time}")
with open("honeypot_log.txt", "a") as log_file:
log_file.write(f"Conexão recebida de {client_address[0]}:{client_address[1]} em {current_time}\n")
response = "Bem-vindo ao honeypot!\n"
client_socket.send(response.encode())
while True:
data = client_socket.recv(1024)
if not data:
break
with open("honeypot_log.txt", "a") as log_file:
log_file.write(f"Dados recebidos de {client_address[0]}:{client_address[1]} em {current_time}:\n")
log_file.write(data.decode())
log_file.write("\n")
analise_trafego(data)
response = "Obrigado por sua solicitação.\n"
client_socket.send(response.encode())
client_socket.close()
def analise_trafego(data):
pass
def honeypot(port):
#Socket TCP
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('localhost', port))
server_socket.listen(5)
print(f"Aguardando conexões na porta {port}...")
while True:
client_socket, client_address = server_socket.accept()
client_thread = threading.Thread(target=acao_cliente, args=(client_socket, client_address))
client_thread.start()
honeypot(8080)
|
T0tsuK4/honeypot
|
honeypot.py
|
honeypot.py
|
py
| 1,673 |
python
|
en
|
code
| 2 |
github-code
|
6
|
74766917948
|
#-------------------------------------------------------------------------------
# Recipes tests
#-------------------------------------------------------------------------------
import io
import os
import pytest
from pathlib import Path
from cookbook.db import get_db
# Data generators for testing.
#-------------------------------------------------------------------------------
def image_data(
image_bytes = b'hopefully this is a cat image',
image_file_name = 'image.jpg'):
return (io.BytesIO(image_bytes), image_file_name)
def recipe_data(
title = 'different recipe',
author = 'oliver jameson',
description = 'dot dot dot',
source_url = 'http://google.com',
image = 'default',
servings = 1,
prep_time = 4,
cook_time = 8,
ingredients = 'six\nfive\nfour',
instructions = 'new instructions\ngo here'
):
# NOTE: Hack because we can't use this function as a default value.
if image == 'default':
image = image_data()
return {
'title': title,
'author': author,
'description': description,
'source_url': source_url,
'image': image,
'servings': servings,
'prep_time': prep_time,
'cook_time': cook_time,
'ingredients': ingredients,
'instructions': instructions,
}
def yaml_data(
title = 'test recipe',
author = 'chef ramsay',
description = 'yummy',
source_url = 'http://example.com',
servings = 2,
prep_time = 5,
cook_time = 10,
ingredients = '1tbsp nonsense',
instructions = 'put the bla in the bla\nthen do the thing',
yaml_file_name = 'test-recipe.yaml'
):
ingredients_list = '\n'.join([f'- {i.strip()}'
for i in ingredients.split('\n')
if len(i.strip()) > 0])
instructions_list = '\n'.join([f'- {i.strip()}'
for i in instructions.split('\n')
if len(i.strip()) > 0])
yaml_bytes = f'''title: {title}
author: {author}
description: {description}
source_url: {source_url}
servings: {servings}
prep_time: {prep_time}
cook_time: {cook_time}
ingredients:
{ingredients_list}
instructions:
{instructions_list}
'''.encode()
return (io.BytesIO(yaml_bytes), yaml_file_name)
# Test index route.
#-------------------------------------------------------------------------------
def test_index(client, auth):
response = client.get('/recipes', follow_redirects=True)
assert b'Log In' in response.data
assert b'Register' in response.data
auth.login()
response = client.get('/recipes')
assert b'Log Out' in response.data
assert b'test recipe' in response.data
assert b'user_images/whatever.jpg' in response.data
assert b'href=\'/recipes/add\'' in response.data
assert b'href=\'/recipes/view/1\'' in response.data
# Authentication is required.
#-------------------------------------------------------------------------------
@pytest.mark.parametrize('path', (
'/recipes/add',
'/recipes/edit/1',
'/recipes/delete/1',
))
def test_login_required(client, path):
response = client.post(path)
assert response.headers['Location'] == '/auth/login'
# Unauthenticated access is prevented.
#-------------------------------------------------------------------------------
def test_data_privacy(app, client, auth):
with app.app_context():
db = get_db()
db.execute('UPDATE recipe SET user_id = 2 WHERE id = 1')
db.commit()
auth.login()
# Current user can't access other user's recipe.
assert client.post('/recipes/edit/1', data=recipe_data()).status_code == 404
assert client.post('/recipes/delete/1').status_code == 404
assert client.get('/recipes/view/1').status_code == 404
# Current user doesn't see other user's view link.
assert b'href=\'/recipes/view/1\'' not in client.get('/').data
# Recipes must exist to be operated on.
#-------------------------------------------------------------------------------
def test_exists_required(client, auth):
auth.login()
response = client.post('/recipes/delete/2')
assert response.status_code == 404
assert b'Recipe id 2 not found' in response.data
response = client.post('/recipes/edit/2', data=recipe_data())
assert response.status_code == 404
assert b'Recipe id 2 not found' in response.data
# Recipes must be added to the database.
#-------------------------------------------------------------------------------
def test_add(client, auth, app):
auth.login()
assert client.get('/recipes/add').status_code == 200
response = client.post('/recipes/add', data=recipe_data())
assert response.headers['Location'] == '/recipes/view/2'
with app.app_context():
db = get_db()
count = db.execute('SELECT COUNT(id) FROM recipe').fetchone()[0]
assert count == 2
# Recipes must be viewable.
#-------------------------------------------------------------------------------
def test_view(client, auth, app):
auth.login()
response = client.get('/recipes/view/1')
assert response.status_code == 200
assert b'1tbsp nonsense' in response.data
# Recipes must be edited in the database.
#-------------------------------------------------------------------------------
def test_edit(client, auth, app):
auth.login()
assert client.get('/recipes/edit/1').status_code == 200
client.post('/recipes/edit/1', data=recipe_data())
with app.app_context():
db = get_db()
post = db.execute('SELECT * FROM recipe WHERE id = 1').fetchone()
assert post['title'] == 'different recipe'
# Recipes must be validated when added or edited.
#-------------------------------------------------------------------------------
@pytest.mark.parametrize('path', (
'/recipes/add',
'/recipes/edit/1',
))
def test_add_edit_validate(client, auth, path):
auth.login()
recipe = recipe_data(title='')
response = client.post(path, data=recipe)
assert b'Title is required.' in response.data
recipe = recipe_data(author='')
response = client.post(path, data=recipe)
assert b'Author is required.' in response.data
recipe = recipe_data(description='')
response = client.post(path, data=recipe)
assert b'Description is required.' in response.data
recipe = recipe_data(source_url='')
response = client.post(path, data=recipe)
assert b'Source URL is required.' in response.data
recipe = recipe_data(image=image_data(image_file_name=''))
response = client.post(path, data=recipe)
assert b'Image is required.' in response.data
recipe = recipe_data(image=image_data(image_file_name='uhoh.exe'))
response = client.post(path, data=recipe)
assert b'Image not allowed.' in response.data
recipe = recipe_data(servings='')
response = client.post(path, data=recipe)
assert b'Servings is required.' in response.data
recipe = recipe_data(prep_time='')
response = client.post(path, data=recipe)
assert b'Prep Time is required.' in response.data
recipe = recipe_data(cook_time='')
response = client.post(path, data=recipe)
assert b'Cook Time is required.' in response.data
recipe = recipe_data(ingredients='')
response = client.post(path, data=recipe)
assert b'Ingredients is required.' in response.data
recipe = recipe_data(instructions='')
response = client.post(path, data=recipe)
assert b'Instructions is required.' in response.data
# Recipes must be deletable.
#-------------------------------------------------------------------------------
# NOTE: Do we need this?
user_images = Path(__file__).parent / 'user_images'
def test_delete(client, auth, app):
# assert os.path.exists(os.path.join(user_images, 'whatever.jpg'))
auth.login()
response = client.post('/recipes/delete/1')
assert response.headers['Location'] == '/recipes'
with app.app_context():
db = get_db()
recipe = db.execute('SELECT * FROM recipe WHERE id = 1').fetchone()
assert recipe is None
# TODO: Test whether associated image is deleted.
# assert not os.path.exists(os.path.join(user_images, 'whatever.jpg'))
# Recipes must be exportable.
#-------------------------------------------------------------------------------
def test_export(client, auth, app):
auth.login()
response = client.get('/recipes/export/1')
expected = yaml_data()
assert response.get_data() == expected[0].getvalue()
|
cmvanb/cookbook
|
tests/test_recipes.py
|
test_recipes.py
|
py
| 8,458 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33207147676
|
from fastapi import HTTPException, status
from db.models import DbLeague
from routers.schemas import LeagueBase
from routers.slug import name_to_slug
from sqlalchemy.orm import Session
def add_team(db: Session, request: LeagueBase):
league = DbLeague(
name=request.name,
country=request.country,
img=f"images/leagues/{request.img}",
slug=name_to_slug(request.name)
)
db.add(league)
db.commit()
db.refresh(league)
return league
def get_all_teams(db: Session):
return db.query(DbLeague).all()
def get_team_id(db: Session, league_id: int):
league = db.query(DbLeague).filter(DbLeague.id == league_id).first()
if not league:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="League has not been found!")
return league
|
rbujny/League-Team-Players
|
db/db_league.py
|
db_league.py
|
py
| 816 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27388540421
|
from discord.ext import commands
import biscuitfunctions as bf
async def fixprivs(context):
return bf.getprivs(context) in ['quaid', 'quaidling', 'tesseract']
class admin(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(
name='getid',
pass_context = True)
async def getid(self, context):
authid = context.author.id
await context.author.send(f"Your id is {authid}")
await context.message.delete()
@commands.command(
name='fix',
pass_context = True,
help="Takes no arguments.\nShould fix most issues with the bot.\nRun once and check problem, if it persists run it again.\nRunning more than twice does not help.")
@commands.check(fixprivs)
async def fix(self, context):
await context.send("I'm trying to fix myself!", delete_after=60)
connections = ""
print(self.bot.voice_clients)
if self.bot.voice_clients:
for x in self.bot.voice_clients:
await x.disconnect(force=True)
connections = connection + f"{x.channel}, "
await context.send(f"I disconnected from the following channels: {connections[:-2]}", delete_after=60)
await context.send("If that doesn't work, try running !fix again")
return
else:
await context.send("I am not connected to any voice channels, reloading all extensions", delete_after=60)
extensions = list(self.bot.extensions.keys())
print(extensions)
for ext in extensions:
try:
self.bot.reload_extension(ext)
await context.message.channel.send("```{} reloaded```".format(ext), delete_after=60)
print(f"----------------- \nReloaded {ext}\n ----------------- ")
except Exception as e:
await context.message.channel.send("```py\n{}: {}\n```".format(type(e).__name__, str(e)), delete_after=60)
print("```py\n{}: {}\n```".format(type(e).__name__, str(e)))
await context.send("I have tried all my troubleshooting, if I'm still not working talk to my dad.", delete_after=60)
def setup(bot):
bot.add_cog(admin(bot))
|
delta1713/ButteryBiscuitBot
|
admin.py
|
admin.py
|
py
| 2,262 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73348441787
|
from datetime import datetime
import math
from abc import abstractmethod
from typing import List, Tuple
from anteater.core.anomaly import Anomaly, RootCause
from anteater.core.kpi import KPI, Feature, JobConfig
from anteater.core.ts import TimeSeries
from anteater.model.algorithms.spectral_residual import SpectralResidual
from anteater.model.algorithms.slope import check_trend
from anteater.source.metric_loader import MetricLoader
from anteater.utils.common import same_intersection_pairs
from anteater.utils.datetime import DateTimeManager as dt
from anteater.utils.log import logger
from anteater.utils.timer import timer
class Detector:
"""The kpi anomaly detector base class"""
def __init__(self, data_loader: MetricLoader, **kwargs) -> None:
"""The detector base class initializer"""
self.data_loader = data_loader
@abstractmethod
def detect_kpis(self, kpis: List[KPI]) -> List[Anomaly]:
"""Executes anomaly detection on kpis"""
def execute(self, job_config: JobConfig) -> List[Anomaly]:
"""The main function of the detector"""
kpis = job_config.kpis
features = job_config.features
n = job_config.root_cause_num
if not kpis:
logger.info('Empty kpi in detector: %s.',
self.__class__.__name__)
return []
return self._execute(kpis, features, top_n=n)
def get_unique_machine_id(self, start: datetime, end: datetime,
kpis: List[KPI]) -> List[str]:
"""Gets unique machine ids during past minutes"""
metrics = [_kpi.metric for _kpi in kpis]
machine_ids = self.data_loader.get_unique_machines(start, end, metrics)
return machine_ids
def find_root_causes(self, anomalies: List[Anomaly],
features: List[Feature], top_n=3) -> List[Anomaly]:
"""Finds root causes for each anomaly events"""
result = []
for anomaly in anomalies:
root_causes = self.cal_top_rac(anomaly, features, top_n=top_n)
anomaly.root_causes = root_causes
result.append(anomaly)
return result
def cal_top_rac(self, anomaly: Anomaly,
features: List[Feature], top_n=3) -> List[RootCause]:
"""calculates the top n root causes for the anomaly events"""
root_causes = []
for f in features:
ts_scores = self.cal_metric_ab_score(f.metric, anomaly.machine_id)
for _ts, _score in ts_scores:
if not check_trend(_ts.values, f.atrend):
logger.info('Trends Filtered: %s', f.metric)
break
if same_intersection_pairs(_ts.labels, anomaly.labels):
root_causes.append(RootCause(
metric=_ts.metric,
labels=_ts.labels,
score=_score))
priorities = {f.metric: f.priority for f in features}
root_causes.sort(key=lambda x: x.score, reverse=True)
root_causes = root_causes[: top_n]
root_causes.sort(key=lambda x: priorities[x.metric])
return root_causes
def cal_kpi_anomaly_score(self, anomalies: List[Anomaly],
kpis: List[KPI]) -> List[Anomaly]:
"""Calculates anomaly scores for the anomaly kpis"""
atrends = {k.metric: k.atrend for k in kpis}
for _anomaly in anomalies:
metric = _anomaly.metric
machine_id = _anomaly.machine_id
labels = _anomaly.labels
ts_scores = self.cal_metric_ab_score(metric, machine_id)
for _ts, _score in ts_scores:
if not same_intersection_pairs(_ts.labels, labels):
continue
if not check_trend(_ts.values, atrends[metric]):
logger.info('Trends Filtered: %s', metric)
_anomaly.score = 0
else:
_anomaly.score = _score
break
return anomalies
def cal_metric_ab_score(self, metric: str, machine_id: str) \
-> List[Tuple[TimeSeries, int]]:
"""Calculates metric abnormal scores based on sr model"""
start, end = dt.last(minutes=10)
ts_list = self.data_loader.get_metric(
start, end, metric, machine_id=machine_id)
point_count = self.data_loader.expected_point_length(start, end)
model = SpectralResidual(12, 24, 50)
ts_scores = []
for _ts in ts_list:
if sum(_ts.values) == 0 or \
len(_ts.values) < point_count * 0.9 or\
len(_ts.values) > point_count * 1.5 or \
all(x == _ts.values[0] for x in _ts.values):
score = 0
else:
score = model.compute_score(_ts.values)
score = max(score[-25:])
if math.isnan(score) or math.isinf(score):
score = 0
ts_scores.append((_ts, score))
return ts_scores
@timer
def _execute(self, kpis: List[KPI], features: List[Feature], **kwargs) \
-> List[Anomaly]:
logger.info('Execute model: %s.', self.__class__.__name__)
anomalies = self.detect_kpis(kpis)
if anomalies:
logger.info('%d anomalies was detected on %s.',
len(anomalies), self.__class__.__name__)
anomalies = self.find_root_causes(anomalies, features, **kwargs)
anomalies = self.cal_kpi_anomaly_score(anomalies, kpis)
return anomalies
|
openeuler-mirror/gala-anteater
|
anteater/model/detector/base.py
|
base.py
|
py
| 5,619 |
python
|
en
|
code
| 1 |
github-code
|
6
|
13284456276
|
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.views.generic import TemplateView
from frontend import views
from frontend import facebook
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^settings/', views.settings, name='settings'),
url(r'', include('social_auth.urls')),
url(r'^add/', views.add, name='add'),
url(r'^addgroup/', views.addgroup, name='addgroup'),
url(r'^inviteall/(?P<event_id>\w+)', views.inviteall, name='inviteall'),
url(r'^addfriend/', views.addfriend, name='addfriend'),
url(r'^personal/$', views.personal, name='personal'),
url(r'^logout/$', views.logout, name='logout'),
url(r'^search/$', views.search, name='search'),
url(r'^success/$', TemplateView.as_view(template_name="frontend/success.html"), name="event_success"),
url(r'^tutorial/$', TemplateView.as_view(template_name="frontend/tutorial.html"), name="tutorial"),
url(r'^features/$', TemplateView.as_view(template_name="frontend/features.html"), name="features"),
url(r'^cal/$', views.calendar, name="calendar"),
url(r'^eventsXML$', views.eventsXML),
url(r'^dataprocessor$', views.dataprocessor),
url(r'^refresh/', views.refresh, name='refresh'),
url(r'^rsvp/', views.addrsvp, name='addrsvp'),
url(r'^rmrsvp/(?P<id>\w+)/', views.rmrsvp, name='rmrsvp'),
url(r'^rmrsvp/', views.rmrsvp, name='rmrsvp'),
url(r'^removenew/', views.removenew, name='removenew'),
url(r'^invite/', views.invite, name='invite'),
url(r'^rmgroup/(?P<group>\w+)/$', views.rmgroup, name='rmgroup'),
url(r'^importgroup/(?P<group>\w+)/$', facebook.importgroup, name='importgroup'),
url(r'^rmfriend/(?P<user>\w+)/$', views.rmfriend, name='rmfriend'),
url(r'^rmevent/(?P<event>\w+)/$', views.rmevent, name='rmevent'),
url(r'^edit/(?P<event>\w+)/$', views.edit, name='edit'),
url(r'^import_events/$', facebook.import_events, name='import_events'),
url(r'^export_event/(?P<event>\w+)/$', facebook.export_event, name='export_event'),
url(r'^personal_ajax/(?P<event>\w+)/$', views.personal_ajax, name='personal_ajax'),
url(r'^editevent/(?P<event>\w+)/$', views.editevent, name='editevent'),
url(r'^filter/(?P<tag>\w+)/$', views.filter, name='filter'),
url(r'^filter/$', views.filter, name='filter_init'),
url(r'^api/get_names/', views.get_names, name='get_names'),
url(r'^api/get_tags/', views.get_tags, name='get_tags'),
url(r'^api/get_memnames/', views.get_memnames, name='get_memnames'),
)
if not settings.DEBUG:
urlpatterns += patterns('',
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
)
|
jjchen/cos333
|
frontend/urls.py
|
urls.py
|
py
| 2,723 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40732481853
|
import sys
num = int(input())
dic = {}
for i in range(num):
dic[i+1] = set()
m = int(input())
for i in range(m):
a,b = map(int, sys.stdin.readline().split())
dic[a].add(b)
dic[b].add(a)
visited = list()
def dfs(i,dic):
for j in dic[i]:
if j not in visited:
visited.append(j)
dfs(j,dic)
dfs(1,dic)
print(len(visited)-1)
|
seriokim/Coding-Study
|
백준 단계별로 풀어보기/silver3/2606.py
|
2606.py
|
py
| 378 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23182012572
|
riddles = {"What language do we learn?": "python",
"Which version of python we learn?": "3.6",
"An element, feature, "
" or factor that is liable to vary or change.": "variable",
"Which loop should we use"
" with evaluation after iteration?": "do-while",
"In python everything is ...": "object"
}
rightAnswers = 0
for key in riddles:
print(key)
answer = input("Enter your answer: ")
if answer.lower() == riddles.get(key):
print("Your answer is right")
rightAnswers += 1
else:
print("Answer is wrong :(")
print("You have {} right answers!".format(rightAnswers))
|
vkhalaim/pythonLearning
|
tceh/lection1/riddles.py
|
riddles.py
|
py
| 682 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31316548360
|
from pycif.utils.path import init_dir
import os
from shutil import copytree, ignore_patterns, rmtree, copy
def ini_mapper(model, transform_type, inputs={}, outputs={}, backup_comps={}):
default_dict = {'input_dates': model.input_dates, 'force_read': True,
'force_dump': True}
dict_surface = dict(default_dict, **{'domain': model.domain})
# Executable
mapper = {'inputs':
{('fluxes', s): dict_surface
for s in ['CH4']},
'outputs': {('concs', s): {}
for s in ['CH4']}
}
return mapper
|
san57/python
|
CIF/build/lib/pycif/plugins/models/flexpart/ini_mapper.py
|
ini_mapper.py
|
py
| 619 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3504439372
|
#!/usr/bin/python3
""" This is the module for a function that divides every
element of matrix by div
"""
def matrix_divided(matrix, div):
"""this function divides every element in a matrix by nubmer div
Args:
matrix (list): list of list of int/float
div (int): nubmer to use as divisor
"""
size_of_item = len(matrix[0])
res = []
if not isinstance(matrix, list):
raise TypeError("matrix must be a matrix (list of lists)"
" of integers/floats")
if not isinstance(div, int) and not isinstance(div, float):
raise TypeError("div must be a number")
if div == 0:
raise ZeroDivisionError("division by zero")
for i in matrix:
tmp = []
if len(i) != size_of_item:
raise TypeError("Each row of the matrix must"
" have the same size")
for j in i:
if not isinstance(j, int) and not isinstance(j, float):
raise TypeError(
"matrix must be a matrix (list of lists)"
" of integers/floats"
)
tmp.append(round(j / div, 2))
res.append(tmp)
return res
|
MATRIX30/alx-higher_level_programming
|
0x07-python-test_driven_development/2-matrix_divided.py
|
2-matrix_divided.py
|
py
| 1,212 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41474623270
|
from __future__ import division # Why is this not standard.
import datetime
import re
class Tribunal(object):
"""System for keeping players in check"""
def __init__(self, config, callback_message_func):
super(Tribunal, self).__init__()
# We need someway of keeping track if someone is being bad.
self._user_points = dict() # single values ['psykzz'] = 0
self._user_spam = dict() # of tuples ['psykzz'] = (10,timestamp)
self._common_urls = dict() # single values ['google.com'] = 5
self._blocked_urls = set() # single values ('google.com',)
# Spam config, the default here is to alert of more then 5 messages in a 10 second burst, gaining 5 points for each infraction
self._spam_message_rate = config.get('spam_message_rate', 5)
self._spam_message_per_sec = config.get('spam_message_per_sec', 10)
self._points_per_infraction = config.get('points_per_infraction', 5)
self._point_deduction_rate = config.get('point_deduction_rate', 5)
self._allcap_percent_threshold = float(config.get('allcap_percent_threshold', 1))
self._allcap_min_length = config.get('allcap_min_length', 3)
# regex for finding urls
self.__url_regex_pattern = r'http[s]?://[^\s<>"]+|www\.[^\s<>"]+'
self._url_regex_pattern = re.compile(self.__url_regex_pattern)
# callback messaging function to message through IRC
self._callback_message_func = callback_message_func
def _send(self, target, message):
return self._callback_message_func(target, message)
def requires_action(self, name, limit=50):
if self._get_points(name) > limit:
return True
return False
''' URL System '''
def add_url(self, url):
self._blocked_urls.add(url)
def remove_url(self, url):
self._blocked_urls.discard(url) # only need to remove once, as its only added once.
def check_url(self, url):
if url in self._blocked_urls:
return True
return False
''' Point System '''
def _get_points(self, name):
if name is None:
return
if name not in self._user_points:
return 0
return self._user_points[name]
def _set_points(self, name, points):
if name is None:
return
if points is None:
return
self._user_points[name] = points
def _add_points(self, name, points=1):
if name not in self._user_points:
self._user_points[name] = points
else:
self._user_points[name] += points
def _remove_points(self, name, points=1):
if name not in self._user_points:
self._user_points[name] = 0
else:
self._user_points[name] -= points
def check_messages(self, client, event):
local_score = 0
error_log = []
# check was there all caps
if self._check_for_allcaps(event):
local_score += self._points_per_infraction # 5 points for all caps
error_log.append('Using AllCaps')
# check for spam :(
spam = self._check_for_individual_spam(event)
self._send(event.target, str(spam))
if spam is False: # Stupid that its false but i want to try and be clever...
local_score += self._points_per_infraction # 5 points for all the things!
error_log.append('Spamming in chat')
# Just do the URL check...
self._capture_urls(event)
# check for spamming urls 5 maybe too many?
'''
if self._capture_urls(event) > 5:
local_score += 1
error_log.append('Spamming URLS')
'''
if local_score > 0:
self._add_points(event.source, local_score)
self._send(event.source, 'OMFG N00B u dun goofed, if you dont stop this shit! Points : {}, errors : {}'.format(self._get_points(event.source), error_log))
else:
self._remove_points(event.source, self._point_deduction_rate)
def _check_for_allcaps(self, event):
if len(event.message) <= self._allcap_min_length:
return False
_len = sum(1 for word in event.message if word.isalpha()) # Ignore none alpha characters
_caps = sum(1 for word in event.message if word.isupper()) # Count the number of upper case characters.
return ((_caps / _len) >= self._allcap_percent_threshold)
def _check_for_individual_spam(self, event):
now = datetime.datetime.now()
allowance = self._spam_message_rate
if event.source in self._user_spam:
time_passed = now - self._user_spam[event.source][1]
allowance = self._user_spam[event.source][0]
allowance += time_passed.seconds * (self._spam_message_rate / self._spam_message_per_sec)
if allowance > self._spam_message_rate:
allowance = self._spam_message_rate
allowance -= 1
self._user_spam[event.source] = (allowance, now)
else:
self._user_spam[event.source] = (allowance, now)
if (allowance < 1):
return False
else:
return allowance
''' I think this whole system needs to be reworked '''
def _capture_urls(self, event, return_urls=False):
# not sure if convert to string is needed.
urls = self._url_regex_pattern.findall( str(event.message) )
for url in urls:
if url in self._capture_urls:
self._capture_urls[url] += 1
else:
self._capture_urls[url] = 1
# Maybe helpful later
if return_urls:
return urls
else:
return len(urls)
def _save_urls(self):
pass
|
psykzz/ircmod_gradiusbot
|
mod_tribunal.py
|
mod_tribunal.py
|
py
| 5,908 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4956366915
|
from django.db import models
from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator, MinValueValidator
# Create your models here.
class Pricebaba(models.Model):
first_name = models.CharField(max_length=100, null=False);
last_name = models.CharField(max_length=100, null=False);
email = models.EmailField(max_length = 254);
mobile = models.IntegerField(validators=[MinValueValidator(7000000000), MaxValueValidator(9999999999)], null=False);
age = models.IntegerField(max_length=100, null=False);
dob = models.DateField();
location = models.CharField(max_length=100, null=False);
created_by = models.ForeignKey(User, on_delete=models.CASCADE, default='1')
def details_edit(self):
return f"/user_edit/{self.id}/"
|
nidhisha-shetty/Human-Resource-CRM-System
|
pricebabaapp/models.py
|
models.py
|
py
| 768 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3035674585
|
# merge two sorted linked lists by splicing them together into
# a linked list that is itself sorted
import sys
#example input:
#List 1: 1 -> 2 -> 4
#List 2: 1 -> 3 -> 4
#output: 1 -> 1 -> 2 -> 3 -> 4 -> 4
from linked_list import list_head,node
list_1 = list_head()
list_2 = list_head()
list_1.append_head(4)
list_1.append_head(2)
list_1.append_head(1)
list_2.append_head(5)
list_2.append_head(4)
list_2.append_head(3)
list_2.append_head(1)
def merge_lists(list_1,list_2):
merged = list_head()
dummy = node('null')
merged.head = dummy
current1 = list_1.head
current2 = list_2.head
while (current1 != None) and (current2 != None):
val1 = current1.data_val
val2 = current2.data_val
if (val1<=val2):
dummy.next = current1
current1 = current1.next
else:
dummy.next = current2
current2 = current2.next
dummy = dummy.next
if current1 != None:
dummy.next = current1
elif current2 != None:
dummy.next = current2
return merged.head.next
merged = merge_lists(list_1,list_2)
current_node = merged
while current_node != None:
print(current_node.data_val)
current_node = current_node.next
|
estimatrixPipiatrix/decision-scientist
|
key_algos/merge_sorted.py
|
merge_sorted.py
|
py
| 1,231 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30301215925
|
########## Use:
########## Last Modified:
########## Author: Yamaga
##### dependencies
from __future__ import print_function, division
import os, sys
from astropy.io import fits
import numpy as np
import astropy.io.fits
from astropy.nddata import Cutout2D
from astropy import units as u
import shutil
import optparse
import astropy
print("input")
#### input
obj = raw_input("object_name (ex. NGC7538) : ")
regrid = raw_input('IR fitsfiles (XXX.fits,YYY.fits...) : ').split(',') # XXX.fits,YYY.fits
template2 = raw_input('regrid_template (ZZZ.fits) : ') # ZZZ.fits
print('===================================================')
waveli = []
# wavelenge search
for k in range(0,len(regrid)):
print("search wavelen"+str(k+1)+" th start.")
print("")
li = []
hdulist = astropy.io.fits.open(regrid[k])
hdu = hdulist[0]
data = hdu.data
header1 = hdu.header
try:
a = hdu.header["WAVELEN"]
except:
try:
a = hdulist[0].header["WAVELNTH"]
except:
print('===================================================')
print(infile[k])
a = input("WAVELEN = ")
print('===================================================')
waveli.append(a)
print('===================================================')
print("1st regrid phase")
print("")
### regrid1
fitsnames = []
template1 = regrid
for k in range(len(regrid)):
image = '.image'
pre = 'regrid_'
### CASAtasks
importfits(fitsimage=regrid[k], imagename=regrid[k] + image)
importfits(fitsimage=template1[k], imagename=template1[k] + image)
imregrid(imagename=regrid[k] + image, output= pre+regrid[k]+image,template=template1[k] + image)
print(pre+regrid[k]+image)
exportfits(imagename=pre+regrid[k]+image, fitsimage= pre+regrid[k], overwrite=True)
fitsnames.append(pre+regrid[k])
print("1st regrid has finished.")
print('===================================================')
print('===================================================')
print("saturate_delete phase")
print("")
### satu_delete
infile = fitsnames
fitsnames = []
wavelen = []
# wavelenge search
for k in range(0,len(infile)):
li = []
hdulist = astropy.io.fits.open(infile[k])
hdu = hdulist[0]
data = hdu.data
header1 = hdu.header
x = hdu.header['NAXIS1']
y = hdu.header['NAXIS2']
hdu.header['OBJECT'] = obj
try:
waveli[k] = hdu.header["WAVELEN"]
except:
hdu.header['WAVELEN'] = waveli[k]
### saturate delete
for i in range(0,y):
for j in range(0,x):
v = data[i][j]
if v == np.nan :
v = np.nan
elif v <= 0:
v = np.nan
li.append(v)
data = np.reshape(li,[y,x]) # reshpe(x*y)
head = astropy.io.fits.PrimaryHDU(data = data)
head.header = header1
filename = obj+"_"+str(waveli[k])+".fits"
fitsnames.append(filename)
wavelen.append(waveli[k])
head.writeto(filename, overwrite=True)
print("satu_delete "+str(k+1)+" th has finished.")
print(" ")
print("wavelen : "+str(wavelen))
print("waveli : "+str(waveli))
print(fitsnames)
print("saturate_delete has finished.")
print('===================================================')
print('===================================================')
print("2nd regrid phase")
print("")
### regrid2
regrid = fitsnames
fitsnames = []
for k in range(len(regrid)):
image = '.image'
pre = 'regrid_'
### CASAtasks
importfits(fitsimage=regrid[k], imagename=regrid[k] + image)
importfits(fitsimage=template2, imagename=template2 + image)
imregrid(imagename=regrid[k] + image, output= pre+regrid[k]+image,template=template2 + image)
print(pre+regrid[k]+image)
exportfits(imagename=pre+regrid[k]+image, fitsimage= pre+regrid[k], overwrite=True)
fitsnames.append(pre+regrid[k])
print(fitsnames)
print("2nd regrid has finished.")
print('===================================================')
print("FINISHED!")
### create new folder
os.mkdir(obj+"_match")
for name in fitsnames:
shutil.move(name,obj+"_match")
|
Sound-110316/Personal_repository
|
pix_awase.py
|
pix_awase.py
|
py
| 4,121 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27741430831
|
import os
import fileinput
import logging
import argparse
import shutil
import re
from sys import platform
import socket
# import reggie source code
# use reggie2.0 functions by adding the path
import settings
settings.init() # Call only once
import sys
sys.path.append(settings.absolute_reggie_path)
reggie_exe_path = os.path.join(settings.absolute_reggie_path,'reggie.py')
if not os.path.exists(reggie_exe_path) :
print("Reggie main file not found in reggie repository under: '%s'" % reggie_exe_path)
exit(1)
from repas_tools import finalize
import repas_tools
from combinations import getCombinations
from combinations import isKeyOf
from combinations import readKeyValueFile
from tools import red
from tools import yellow
from timeit import default_timer as timer
import tools
import args_parser
"""
General workflow:
1. FIX THIS: ------------------ get the command line arguments 'args' with path to ".gitlab-ci.yml" file
2. FIX THIS: ------------------ set the logger 'log' with the debug level from 'args' to determine the level of logging which displays output to the user
3. FIX THIS: ------------------ perform the regression check by a) building executables
------------------ b) running the code
------------------ c) performing the defined analyzes
4. FIX THIS: ------------------ display the summary table with information for each build, run and analysis step
5. FIX THIS: ------------------ display if regression check was successful or not and return the corresponding error code
"""
print('')
print(tools.red('=============================================================================================================================='))
print(tools.red(' _____ _____ _____ _____ _____ '))
print(tools.red(' /\ \ /\ \ /\ \ /\ \ /\ \ '))
print(tools.red(' /::\ \ /::\ \ /::\ \ /::\ \ /::\ \ '))
print(tools.red(' /::::\ \ /::::\ \ /::::\ \ /::::\ \ /::::\ \ '))
print(tools.red(' /::::::\ \ /::::::\ \ /::::::\ \ /::::::\ \ /::::::\ \ '))
print(tools.red(' /:::/\:::\ \ /:::/\:::\ \ /:::/\:::\ \ /:::/\:::\ \ /:::/\:::\ \ '))
print(tools.red(' /:::/__\:::\ \ /:::/__\:::\ \ /:::/__\:::\ \ /:::/__\:::\ \ /:::/__\:::\ \ '))
print(tools.red(' /::::\ \:::\ \ /::::\ \:::\ \ /::::\ \:::\ \ /::::\ \:::\ \ \:::\ \:::\ \ '))
print(tools.red(' /::::::\ \:::\ \ /::::::\ \:::\ \ /::::::\ \:::\ \ /::::::\ \:::\ \ ___\:::\ \:::\ \ '))
print(tools.red(' /:::/\:::\ \:::\____\ /:::/\:::\ \:::\ \ /:::/\:::\ \:::\____\ /:::/\:::\ \:::\ \ /\ \:::\ \:::\ \ '))
print(tools.red('/:::/ \:::\ \:::| |/:::/__\:::\ \:::\____\/:::/ \:::\ \:::| |/:::/ \:::\ \:::\____\/::\ \:::\ \:::\____\ '))
print(tools.red('\::/ |::::\ /:::|____|\:::\ \:::\ \::/ /\::/ \:::\ /:::|____|\::/ \:::\ /:::/ /\:::\ \:::\ \::/ / '))
print(tools.red(' \/____|:::::\/:::/ / \:::\ \:::\ \/____/ \/_____/\:::\/:::/ / \/____/ \:::\/:::/ / \:::\ \:::\ \/____/ '))
print(tools.red(' |:::::::::/ / \:::\ \:::\ \ \::::::/ / \::::::/ / \:::\ \:::\ \ '))
print(tools.red(' |::|\::::/ / \:::\ \:::\____\ \::::/ / \::::/ / \:::\ \:::\____\ '))
print(tools.red(' |::| \::/____/ \:::\ \::/ / \::/____/ /:::/ / \:::\ /:::/ / '))
print(tools.red(' |::| ~| \:::\ \/____/ ~~ /:::/ / \:::\/:::/ / '))
print(tools.red(' |::| | \:::\ \ /:::/ / \::::::/ / '))
print(tools.red(' \::| | \:::\____\ /:::/ / \::::/ / '))
print(tools.red(' \:| | \::/ / \::/ / \::/ / '))
print(tools.red(' \|___| \/____/ \/____/ \/____/ '))
print(tools.red('=============================================================================================================================='))
print('')
start = timer()
# argument parser
parser = argparse.ArgumentParser(description='DESCRIPTION:\nScript for executing the regression checker for NRG codes multiple times with for parameter studies.', formatter_class=argparse.RawTextHelpFormatter)
#parser.add_argument('gitlab_ci', help='Path to gitlab-ci.yml which also contains a /regressioncheck/checks/... structure')
parser.add_argument('-c', '--case', default='.', help='Path to casedir, where repas should be executed.')
#parser.add_argument('-b', '--begin', type=int, default=1, help='Number of the case: where to start with the run (from the list that this tools creates)')
parser.add_argument('-d', '--debug', type=int, default=0, help='Debug level for this program. Dumps all info to the screen.')
#parser.add_argument('-i', '--info', type=int, default=1, help='Debug level for the subsequent program execution (e.g. flexi).')
#parser.add_argument('-o', '--only', action='store_true',help='Only run one case and exit afterwards (from the list that this tools creates).')
parser.add_argument('-x', '--dummy', action='store_true',help='Run repas without supplying parameter_rename.ini and parameter_change.ini files.')
parser.add_argument('-n', '--dryrun', action='store_true',help='Simply list all possible cases without performing any run.')
parser.add_argument('-a', '--hlrs', action='store_true', help='Run on with aprun (hlrs system).')
parser.add_argument('exe', help='Path to executable of code that should be tested.')
# get reggie command line arguments
args = parser.parse_args()
if re.search('^linux',platform) :
hostname=socket.gethostname()
print("platform: %s, hostname: %s" % (platform,hostname))
if re.search('^mom[0-9]+$',hostname) :
print(tools.yellow('Automatic detection of hlrs system: Assuming aprun is used and setting args.hlrs = True'))
args.hlrs = True
elif re.search('^eslogin[0-9]+$',hostname) :
if args.hlrs :
raise Exception('Running with -a or --hlrs. Cannot run this program on a login node. Get interactive job and run on mom node!')
# set the logger 'log' with the debug level from 'args' to determine the level of logging which displays output to the user
tools.setup_logger(args.debug)
log = logging.getLogger('logger')
# display all command line arguments
print("Running with the following command line options")
for arg in args.__dict__ :
print(arg.ljust(15)," = [",getattr(args,arg),"]")
print('='*132)
# define command that is usually run in a shell
# -s for save
# -a for hlrs
# -d1 for debug mode 1
if args.hlrs :
cmd = ['python',reggie_exe_path,'-e',str(args.exe),'.','-s','-a','-d1']
else :
cmd = ['python',reggie_exe_path,'-e',str(args.exe),'.','-s','-d1']
#cmd = ["ls","-l"] # for testing some other commands
if args.case :
if os.path.isdir(args.case) :
os.chdir(args.case)
else :
raise Exception('Supplied case directory is not correctly defined! -c [%s]' %args.case)
if args.dummy :
open('parameter_rename.ini', 'a').close()
open('parameter_change.ini', 'a').close()
# initialize central object and run in current working dir
cwd = os.getcwd()
repas = repas_tools.Case(cwd,cmd,'parameter_rename.ini','parameter_change.ini','parameter.ini') # and the case to the list of cases
# read the combinations for running the setups from parameter_change.ini
combis, digits = getCombinations(os.path.join(cwd,repas.names2_file))
# Edit parameter.ini for multiple parameters, subsequently, the reggie will change a set of variables
# and produce output which must be collected
# loop all runs
i=0
for combi in combis :
# print setup info
print(132*'-')
for key, value in combi.items() :
print("[%25s=%25s] digit=%3s" % (key, value, digits[key]))
# create parameter file for current combi
repas.create(combi,digits)
# read 'parameter_rename.ini' for renaming the results file
repas.names()
# run the code and repas output
repas.run(i)
i += 1
# save data: check output directory for .pdf and .csv files and rename according to info in 'parameter_rename.ini'
repas.save_data()
print(132*'-')
print(" ")
finalize(start, repas.nErrors)
|
piclas-framework/reggie2.0
|
repas/repas.py
|
repas.py
|
py
| 9,185 |
python
|
en
|
code
| 2 |
github-code
|
6
|
8353691653
|
# flake8: noqa
from __future__ import absolute_import, unicode_literals
import json
import os
import pytest
from c8.collection import StandardCollection
from c8.exceptions import (
CollectionCreateError,
CollectionDeleteError,
CollectionFindError,
CollectionImportFromFileError,
CollectionListError,
CollectionPropertiesError,
)
from tests.helpers import assert_raises, extract, generate_random_collection_name
@pytest.mark.vcr
def test_get_collection_information(client, col, tst_fabric_name):
tst_fabric = client._tenant.useFabric(tst_fabric_name)
collection = tst_fabric.collection(col.name)
# Test get information about collection
get_col_info = collection.get_collection_information()
assert get_col_info["error"] is False
assert get_col_info["name"] == collection.name
with assert_raises(CollectionFindError):
tst_fabric.collection(
"test_collection_collection_1"
).get_collection_information()
@pytest.mark.vcr
def test_collection_figures(client, col, tst_fabric_name):
# Test get properties
tst_fabric = client._tenant.useFabric(tst_fabric_name)
collection = tst_fabric.collection(col.name)
get_col_properties = collection.collection_figures()
assert get_col_properties["name"] == collection.name
assert get_col_properties["isSystem"] is False
with assert_raises(CollectionFindError):
tst_fabric.collection("test_collection_collection_2").collection_figures()
@pytest.mark.vcr
def test_collection_attributes(client, col, tst_fabric):
assert col.context in ["default", "async", "batch", "transaction"]
assert col.tenant_name == client._tenant.name
assert col.fabric_name == tst_fabric.name
assert col.name.startswith("test_collection") is True
assert repr(col) == "<StandardCollection {}>".format(col.name)
# def test_collection_misc_methods(col, tst_fabric):
# # Test get properties
# get_col_properties = tst_fabric.collection(col.name).collection_figures()
# assert get_col_properties["name"] == col.name
# assert get_col_properties["isSystem"] is False
# # Test get properties with bad collection
# with assert_raises(CollectionFindError):
# tst_fabric.collection(generate_col_name()).collection_figures()
#
# # Test configure properties
# prev_sync = get_col_properties["waitForSync"]
# prev_has_stream = get_col_properties["hasStream"]
#
# properties = tst_fabric.update_collection_properties(
# collection_name=col.name, has_stream=True, wait_for_sync=True
# )
# assert properties["name"] == col.name
# assert properties["isSystem"] is False
# assert properties["waitForSync"] is not prev_sync
# assert properties["hasStream"] is not prev_has_stream
#
# properties = tst_fabric.update_collection_properties(
# collection_name=col.name, wait_for_sync=False
# )
# assert properties["name"] == col.name
# assert properties["isSystem"] is False
# assert properties["waitForSync"] is False
# assert properties["hasStream"] is True
#
# # Test configure properties with bad collection
# with assert_raises(CollectionPropertiesError) as err:
# tst_fabric.update_collection_properties(
# collection_name=generate_col_name(), wait_for_sync=True
# )
# assert err.value.error_code == 1203
#
# # Test preconditions
# doc_id = col.name + "/" + "foo"
# tst_fabric.collection(col.name).insert({"_id": doc_id})
# assert len(col) == 1
#
# # Test truncate collection
# assert col.truncate() is True
# assert len(col) == 0
# def test_collection_management(tst_fabric, client, bad_fabric):
# # Test create collection
# col_name = generate_col_name()
# assert tst_fabric.has_collection(col_name) is False
#
# col = tst_fabric.create_collection(
# name=col_name,
# sync=False,
# edge=False,
# user_keys=True,
# key_increment=None,
# key_offset=None,
# key_generator="autoincrement",
# shard_fields=None,
# index_bucket_count=None,
# sync_replication=None,
# enforce_replication_factor=None,
# spot_collection=False,
# local_collection=False,
# is_system=False,
# stream=False,
# )
# assert tst_fabric.has_collection(col_name) is True
#
# get_col_properties = tst_fabric.collection(col.name).collection_figures()
# if col.context != "transaction":
# assert "id" in get_col_properties
# assert get_col_properties["name"] == col_name
# assert get_col_properties["waitForSync"] is False
# assert get_col_properties["isSystem"] is False
# assert get_col_properties["keyOptions"]["type"] == "autoincrement"
# assert get_col_properties["keyOptions"]["allowUserKeys"] is True
# assert get_col_properties["keyOptions"]["increment"] == 1
# assert get_col_properties["keyOptions"]["offset"] == 0
#
# # Test create duplicate collection
# with assert_raises(CollectionCreateError) as err:
# tst_fabric.create_collection(col_name)
# assert err.value.error_code == 1207
#
# # Test list collections
# assert col_name in extract("name", tst_fabric.collections())
# bad = client._tenant.useFabric(bad_fabric)
# # Test list collections with bad fabric
# with assert_raises(CollectionListError):
# bad.collections()
#
# # Test get collection object
# test_col = tst_fabric.collection(col.name)
# assert isinstance(test_col, StandardCollection)
# assert test_col.name == col.name
#
# test_col = tst_fabric[col.name]
# assert isinstance(test_col, StandardCollection)
# assert test_col.name == col.name
#
# # Test delete collection
# assert tst_fabric.delete_collection(col_name, system=False) is True
# assert col_name not in extract("name", tst_fabric.collections())
#
# # Test drop missing collection
# with assert_raises(CollectionDeleteError) as err:
# tst_fabric.delete_collection(col_name)
# assert err.value.error_code == 1203
# assert tst_fabric.delete_collection(col_name, ignore_missing=True) is False
@pytest.mark.vcr
def test_insert_from_file(client, col, tst_fabric_name):
absolute_path = os.path.dirname(__file__)
json_path = os.path.join(absolute_path, "files/data.json")
csv_path = os.path.join(absolute_path, "files/data.csv")
invalid_file_path = os.path.join(absolute_path, "files/data")
file = open(json_path)
documents = json.load(file)
client._tenant.useFabric(tst_fabric_name)
client.insert_document_from_file(collection_name=col.name, filepath=json_path)
data = client.collection(collection_name=col.name).export(limit=len(documents))
entries = ("_id", "_key", "_rev")
for doc in data:
for key in entries:
if key in doc:
del doc[key]
assert documents == data
col.truncate()
client.insert_document_from_file(collection_name=col.name, filepath=csv_path)
data = client.collection(collection_name=col.name).export(limit=len(documents))
assert len(data) == len(documents)
col.truncate()
with assert_raises(CollectionImportFromFileError) as err:
client.insert_document_from_file(
collection_name=col.name, filepath=invalid_file_path
)
assert (
str(err)
== "<ExceptionInfo CollectionImportFromFileError('Invalid file') tblen=3>"
)
file.close()
@pytest.mark.vcr
def test_all_documents(client, col, tst_fabric_name):
document_count = 2003
client._tenant.useFabric(tst_fabric_name)
client.execute_query(
query="FOR doc IN 1..{} INSERT {{value:doc}} INTO {}".format(
document_count, col.name
)
)
resp = client.get_all_documents(collection_name=col.name)
assert document_count == len(resp)
for i in range(len(resp)):
assert resp[i]["value"] == i + 1
col.truncate()
document_count = 11
client.execute_query(
query="FOR doc IN 1..{} INSERT {{value:doc}} INTO {}".format(
document_count, col.name
)
)
resp = client.get_all_documents(collection_name=col.name)
assert document_count == len(resp)
for i in range(len(resp)):
assert resp[i]["value"] == i + 1
|
Macrometacorp/pyC8
|
tests/test_collection.py
|
test_collection.py
|
py
| 8,364 |
python
|
en
|
code
| 6 |
github-code
|
6
|
75204307388
|
import mysql.connector
#to check whether its connected
mydb=mysql.connector.connect(host='localhost',user='root',password='isgsql')
if mydb.is_connected()==False:
print('not connected')
raise SystemExit
#creating a cursor object
mycursor=mydb.cursor()
#using/creating database
try:
mycursor.execute('create database Employee')
mycursor.execute('use Employee')
except:
mycursor.execute('use Employee')
def Insert_Initial():
try:
mycursor.execute('''create table Emp(Empno int primary key,
Empname varchar(30),
Salary int,
Department varchar(30),
Designation varchar(30))''')
except:
return #as table already exists
rec_list = []
for I in range(6):
no=int(input('Enter employee no : '))
name=input('Enter employee name : ')
salary=float(input('Enter employee salary : '))
department=input('Enter name of department : ')
designation=input("Enter designation : ")
rec_tuple=(no,name,salary,department,designation)
rec_list.append(rec_tuple)
command='insert into emp(empno,empname,salary,department,designation) values(%s,%s,%s,%s,%s)'
mycursor.executemany(command,rec_list)
print(mycursor.rowcount,'rows affected')
mydb.commit()
Insert_Initial()
#menu-driven functions
def addrecord():
record = (
int(input('Enter employee no : ')),
input('Enter employee name : '),
float(input('Enter employee salary : ')),
input('Enter name of department : '),
input("Enter designation : ")
)
command='insert into emp values(%s,%s,%s,%s,%s)'
mycursor.execute(command,record)
mydb.commit()
print('Operation successfull : record added')
def searchrecord():
try:
query = (
int(input('Enter Employee no : ')),
input('Enter dept : ')
)
command='select * from emp where Empno=%s and department=%s'
mycursor.execute(command,query)
records=mycursor.fetchall()
for I in records:
print(I)
except:
print('Record not found')
def updaterecord():
query = (input('Enter Department to be updated : '),input('Enter Designation to be updated : '))
command='update emp set salary=salary+0.35*salary where Department=%s and Designation=%s'
try:
mycursor.execute(command,query)
mydb.commit()
print('Record updated')
except:
print('Record not found')
def deleterecord():
query = (input('Enter Department'),)
command='delete from emp where Department=%s and Salary<15000'
try:
mycursor.execute(command,query)
mydb.commit()
print('Record deleted')
except:
print('Record not found')
def display():
mycursor.execute('select * from emp')
for I in mycursor.fetchall():
print(I)
#menu-driven
print('''MENU
1. Add record
2. Search record
3. Update record
4. Delete record
5. Display Records
Press any other key to exit\n''')
while True:
ch=input('Enter your choice :')
if ch=='1':
addrecord()
elif ch=='2':
searchrecord()
elif ch=='3':
updaterecord()
elif ch=='4':
deleterecord()
elif ch=='5':
display()
else:
raise SystemExit
|
CS-ION/Class-12-Practicals
|
Practicals/16.py
|
16.py
|
py
| 3,460 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7970861568
|
import os
from charms.reactive import is_state, when_all, when, when_not, set_flag, when_none, when_any, hook, clear_flag
from charmhelpers.core import templating, host, unitdata
from charmhelpers.core.hookenv import ( open_port,
status_set,
config,
unit_public_ip,
log,
application_version_set )
from charmhelpers.core.host import chdir, service_restart
from charms.reactive.relations import endpoint_from_flag
from pathlib import Path
import subprocess
NEXTCLOUD_CONFIG_PHP = '/var/www/nextcloud/config/config.php'
@when('apache.available')
@when_any('mysql.available', 'postgres.master.available')
@when_not('nextcloud.initdone')
def init_nextcloud():
log("Installation and initialization of nextcloud begins.")
mysql = endpoint_from_flag('mysql.available')
postgres = endpoint_from_flag('postgres.master.available')
# Set to 'location' in metadata.yaml IF provided on deploy.
# We cant use the default, since layer:apache-php will not deploy
# the nextcloud site properly if we pre-build the directory structure
# under /var/www/nextcloud
# Hence, we need to use a directory outside of the /var/www/nextcloud structure
# when we use juju storage here (since we are to use the layer:apache-php).
data_dir = unitdata.kv().get("nextcloud.storage.data.mount")
if os.path.exists(str(data_dir)):
# Use non default for nextcloud
log("nextcloud storage location for data set as: {}".format(data_dir))
host.chownr(data_dir, "www-data", "www-data", follow_links=False, chowntopdir=True)
os.chmod(data_dir, 0o700)
else:
# If no custom data_dir get to us via storage, we use the default
data_dir = '/var/www/nextcloud/data'
ctxt = {'dbname': None,
'dbuser': None,
'dbpass': None,
'dbhost': None,
'dbport': None,
'dbtype': None,
'admin_username': config().get('admin-username'),
'admin_password': config().get('admin-password'),
'data_dir': Path(data_dir),
}
if mysql:
ctxt['dbname'] = mysql.database()
ctxt['dbuser'] = mysql.user()
ctxt['dbpass'] = mysql.password()
ctxt['dbhost'] = mysql.host()
ctxt['dbport'] = mysql.port()
ctxt['dbtype'] = 'mysql'
elif postgres:
ctxt['dbname'] = postgres.master.dbname
ctxt['dbuser'] = postgres.master.user
ctxt['dbpass'] = postgres.master.password
ctxt['dbhost'] = postgres.master.host
ctxt['dbport'] = postgres.master.port
ctxt['dbtype'] = 'pgsql'
else:
log("Failed to determine supported database.")
status_set('maintenance', "Initializing Nextcloud")
# Comment below init to test installation manually
log("Running nexcloud occ installation...")
nextcloud_init = ("sudo -u www-data /usr/bin/php occ maintenance:install "
"--database {dbtype} --database-name {dbname} "
"--database-host {dbhost} --database-pass {dbpass} "
"--database-user {dbuser} --admin-user {admin_username} "
"--admin-pass {admin_password} "
"--data-dir {data_dir} ").format(**ctxt)
with chdir('/var/www/nextcloud'):
subprocess.call(("sudo chown -R www-data:www-data .").split())
subprocess.call(nextcloud_init.split())
#TODO: This is wrong and will also replace other values in config.php
#BUG - perhaps add a config here with trusted_domains.
Path('/var/www/nextcloud/config/config.php').write_text(
Path('/var/www/nextcloud/config/config.php').open().read().replace(
"localhost", config().get('fqdn') or unit_public_ip()))
# Enable required modules.
for module in ['rewrite', 'headers', 'env', 'dir', 'mime']:
subprocess.call(['a2enmod', module])
set_flag('apache_reload_needed')
set_flag('nextcloud.initdone')
set_flag('apache.start')
log("Installation and initialization of nextcloud completed.")
open_port(port='80')
status_set('active', "Nextcloud init complete.")
@when_all('apache.started', 'apache_reload_needed')
def reload_apache2():
host.service_reload('apache2')
clear_flag('apache_reload_needed')
@when_none('mysql.available', 'postgres.master.available')
def blocked_on_database():
''' Due for block when no database is available'''
status_set('blocked', "Need Mysql or Postgres relation to continue")
return
@hook('update-status')
def update_status():
'''
Calls occ status and sets version every now and then (update-status).
:return:
'''
nextcloud_status = "sudo -u www-data /usr/bin/php occ status"
with chdir('/var/www/nextcloud'):
try:
output = subprocess.run( nextcloud_status.split(), stdout=subprocess.PIPE ).stdout.split()
version = output[5].decode('UTF-8')
install_status = output[2].decode('UTF-8')
if install_status == 'true':
application_version_set(version)
status_set('active', "Nextcloud is OK.")
else:
status_set('waiting', "Nextcloud install state not OK.")
except:
status_set('waiting', "Nextcloud install state not OK.")
@when('apache.available')
@when_any('config.changed.php_max_file_uploads',
'config.changed.php_upload_max_filesize',
'config.changed.php_post_max_size',
'config.changed.php_memory_limit')
def config_php_settings():
'''
Detects changes in configuration and renders the phpmodule for
nextcloud (nextcloud.ini)
This is instead of manipulating the system wide php.ini
which might be overwitten or changed from elsewhere.
'''
phpmod_context = {
'max_file_uploads': config('php_max_file_uploads'),
'upload_max_filesize': config('php_upload_max_filesize'),
'post_max_size': config('php_post_max_size'),
'memory_limit': config('php_memory_limit')
}
templating.render(source="nextcloud.ini",
target='/etc/php/7.2/mods-available/nextcloud.ini',
context=phpmod_context)
subprocess.check_call(['phpenmod', 'nextcloud'])
if is_state("apache.started"):
log("reloading apache2 after reconfiguration")
host.service_reload('apache2')
flags=['config.changed.php_max_file_uploads',
'config.changed.php_upload_max_filesize',
'config.changed.php_memory_limit',
'config.changed.php_post_max_size']
for f in flags:
clear_flag(f)
|
erik78se/layer-nextcloud
|
src/reactive/nextcloud.py
|
nextcloud.py
|
py
| 6,879 |
python
|
en
|
code
| 2 |
github-code
|
6
|
44083675715
|
from typing import Iterable
from scapy.all import *
from scapy.layers.inet import IP
def ip_from_packets(packets: Iterable) -> str:
"""
Get the IP of the machine where the packets are recorded
It is the IP which is present in all packets
:param packets:list of packets
:return: ip address
"""
IPs = {}
for packet in packets:
if IP in packet:
ip_from_packet = [packet[IP].src, packet[IP].dst]
for ip_address in ip_from_packet:
if ip_address in IPs:
IPs[ip_address] += 1
else:
IPs[ip_address] = 1
return max(IPs, key=IPs.get)
def ip_from_pcap(file: str) -> str:
"""
Wrap the above function (ip_from_packets) to read from pcap
:param file: file name/path
:return: ip address
"""
packets = rdpcap(file)
return ip_from_packets(packets)
if __name__ == "__main__":
print(ip_from_pcap("capture.pcap"))
print(ip_from_pcap("trickbot.pcapng"))
print(ip_from_pcap("trickbot2.pcapng"))
|
llmhyy/malware-traffic
|
Experiments/exp16_visualisation/ip_from_pcap.py
|
ip_from_pcap.py
|
py
| 925 |
python
|
en
|
code
| 7 |
github-code
|
6
|
70746450108
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 16 14:19:49 2016
@author: hossam
"""
import random
import numpy
import math
from solution import solution
import time
def WOA(objf, lb, ub, dim, SearchAgents_no, Max_iter):
# dim=30
# SearchAgents_no=50
# lb=-100
# ub=100
# Max_iter=500
if not isinstance(lb, list):
lb = [lb] * dim
if not isinstance(ub, list):
ub = [ub] * dim
# initialize position vector and score for the leader
Leader_pos = numpy.zeros(dim)
Leader_score = float("inf") # change this to -inf for maximization problems
# Initialize the positions of search agents
Positions = numpy.zeros((SearchAgents_no, dim))
for i in range(dim):
Positions[:, i] = (
numpy.random.uniform(0, 1, SearchAgents_no) * (ub[i] - lb[i]) + lb[i]
)
# Initialize convergence
convergence_curve = numpy.zeros(Max_iter)
############################
s = solution()
print('WOA is optimizing "' + objf.__name__ + '"')
timerStart = time.time()
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
############################
t = 0 # Loop counter
# Main loop
while t < Max_iter:
for i in range(0, SearchAgents_no):
# Return back the search agents that go beyond the boundaries of the search space
# Positions[i,:]=checkBounds(Positions[i,:],lb,ub)
for j in range(dim):
Positions[i, j] = numpy.clip(Positions[i, j], lb[j], ub[j])
# Calculate objective function for each search agent
fitness = objf(Positions[i, :])
# Update the leader
if fitness < Leader_score: # Change this to > for maximization problem
Leader_score = fitness
# Update alpha
Leader_pos = Positions[
i, :
].copy() # copy current whale position into the leader position
a = 2 - t * ((2) / Max_iter)
# a decreases linearly fron 2 to 0 in Eq. (2.3)
# a2 linearly decreases from -1 to -2 to calculate t in Eq. (3.12)
a2 = -1 + t * ((-1) / Max_iter)
# Update the Position of search agents
for i in range(0, SearchAgents_no):
r1 = random.random() # r1 is a random number in [0,1]
r2 = random.random() # r2 is a random number in [0,1]
A = 2 * a * r1 - a # Eq. (2.3) in the paper
C = 2 * r2 # Eq. (2.4) in the paper
b = 1
# parameters in Eq. (2.5)
l = (a2 - 1) * random.random() + 1 # parameters in Eq. (2.5)
p = random.random() # p in Eq. (2.6)
for j in range(0, dim):
if p < 0.5:
if abs(A) >= 1:
rand_leader_index = math.floor(
SearchAgents_no * random.random()
)
X_rand = Positions[rand_leader_index, :]
D_X_rand = abs(C * X_rand[j] - Positions[i, j])
Positions[i, j] = X_rand[j] - A * D_X_rand
elif abs(A) < 1:
D_Leader = abs(C * Leader_pos[j] - Positions[i, j])
Positions[i, j] = Leader_pos[j] - A * D_Leader
elif p >= 0.5:
distance2Leader = abs(Leader_pos[j] - Positions[i, j])
# Eq. (2.5)
Positions[i, j] = (
distance2Leader * math.exp(b * l) * math.cos(l * 2 * math.pi)
+ Leader_pos[j]
)
convergence_curve[t] = Leader_score
if t % 1 == 0:
print(
["At iteration " + str(t) + " the best fitness is " + str(Leader_score)]
)
t = t + 1
timerEnd = time.time()
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
s.executionTime = timerEnd - timerStart
s.convergence = convergence_curve
s.optimizer = "WOA"
s.objfname = objf.__name__
s.best = Leader_score
s.bestIndividual = Leader_pos
return s
|
7ossam81/EvoloPy
|
optimizers/WOA.py
|
WOA.py
|
py
| 4,155 |
python
|
en
|
code
| 393 |
github-code
|
6
|
11260306476
|
def compute_grade(score):
if score > 1 or score < 0:
print("Input out of range.")
quit()
elif score >= 0.9:
grade = 'A'
elif score >= 0.8:
grade = 'B'
elif score >= 0.7:
grade = 'C'
elif score >= 0.6:
grade = 'D'
else:
grade = 'F'
return grade
try:
score = float(input("Enter score between 0.0 and 1.0:\n"))
grade = compute_grade(score)
print(f"Grade: {grade}")
except:
print("Invalid input.")
|
authura/python_practice
|
score_to_grade.py
|
score_to_grade.py
|
py
| 501 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18722283162
|
import numpy as np
import matplotlib.pyplot as plt
X = np.array([[2.5, 3.0, 3.0, 3.5, 5.5, 6.0, 6.0, 6.5],
[3.5, 3.0, 4.0, 3.5, 5.5, 6.0, 5.0, 5.5]])
num_rows, N = X.shape
c = 2
# c = 3
# c = 4
V = np.zeros((num_rows, c))
U = np.zeros((c, N))
row_iteration = 0
for i in range(N):
U[row_iteration, i] = 1
row_iteration = (row_iteration + 1) % c
print(U)
U = U[:, np.random.permutation(N)]
is_stop_criterion = 10000
epsilon = 0.00001
t = 0
while is_stop_criterion > epsilon:
t += 1
for i in range(c):
for j in range(num_rows):
V[j, i] = np.sum(X[j, :] * U[i, :]) / np.sum(U[i, :])
V[np.isnan(V)] = 0
d = np.zeros((c, N))
for i in range(c):
for j in range(N):
d[i, j] = np.sum((X[:, j] - V[:, i]) ** 2)
J = np.sum(U * d)
U_save = U.copy()
U = np.zeros((c, N))
for j in range(N):
min_cluster = np.argmin(d[:, j])
U[min_cluster, j] = 1
is_stop_criterion = np.linalg.norm(U - U_save)
print("Partition matrix:")
print(U)
print("Cluster centers:")
print(V)
print("Minimum:")
print(J)
print("Number of iterations:")
print(t)
plt.scatter(X[0, :], X[1, :])
plt.scatter(V[0, :], V[1, :])
plt.show()
|
vvsct/c-means
|
hcm.py
|
hcm.py
|
py
| 1,215 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13526459322
|
# YOUR NAME:
# YOUR PSU EMAIL ADDRESS:
# END OF COMMENTS
# ------------------------------------------------------
# PLACE ANY NEEDED IMPORT STATEMENTS HERE:
# END OF IMPORT STATEMENTS
# =====================================================
# DEFINE YOUR FUNCTIONS IN THIS SECTION
# -----------------------------------------------------
# FUNCTION NAME: displayBoard
# INPUT: the board
# PROCESS: Put each value on the screen in "tic tac toe" board format
# OUTPUT: The board goes to the output, there is no return value
def filled(x):
filscore = 0
for i in range(9):
if x[i] == 'X':
filscore += 1
if filscore == 9:
return True
else:
return False
def displayBoard(x):
for i in range(0, 9, 3):
for j in range(3):
print(x[i + j], end=' | ')
if i != 8:
print('')
# -----------------------------------------------------
# FUNCTION NAME: filled
# INPUT: the board
# PROCESS: Looks at each spot in the board
# OUTPUT: Return "True" if the board is full, "False" otherwise
# -----------------------------------------------------
# FUNCTION NAME: makeMove
# INPUT: the board, which position to place an "X"
# PROCESS: Checks that the board position is empty; if not, display a message, otherwise update the board
# OUTPUT: No output except perhaps the error message; no return value
# END OF FUNCTION DEFINITIONS
def makeMove(b, x):
if (x < 1) and (x > 9):
print('Out of range!')
elif b[x-1] == 'X':
print('That position is filled! Try again!')
else:
b[x-1] = 'X'
'''
if 9 >= x >= 1:
if b[x-1] == x:
b[x - 1] = 'X'
else:
#print('')
print('That position is filled! Try again!')
# print('')
# displayBoard(b)
else:
print('Out of range!')
'''
# =====================================================
# MAIN PART OF THE PROGRAM
def main():
board = ['_', '_', '_', '_', '_', '_', '_', '_', '_']
for i in range(9):
board[i]=i+1
# print(board)
# PROGRAM BEGINS HERE
while not (filled(board)):
displayBoard(board)
# print(board)
x = int(input("Enter move for x (1-9): "))
print(x)
if x < 1 or x > 9:
print("Please enter a valid position number 1 through 9")
else:
makeMove(board, x)
displayBoard(board)
print("End of game!")
# INCLUDE THE FOLLOWING 2 LINES, BUT NOTHING BETWEEN HERE
if __name__ == "__main__":
main()
# AND HERE
|
SidPatra/ProgrammingPractice
|
Practicing-Coding/shaffertictactoe.py
|
shaffertictactoe.py
|
py
| 2,546 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30195630744
|
import unittest
from ops.testing import Harness
from charm import CandidCharm
class TestCharm(unittest.TestCase):
def setUp(self):
self.harness = Harness(CandidCharm)
self.addCleanup(self.harness.cleanup)
self.harness.begin()
def test_website_relation_joined(self):
id = self.harness.add_relation("website", "apache2")
self.harness.add_relation_unit(id, "apache2/0")
data = self.harness.get_relation_data(id, self.harness.charm.unit.name)
self.assertTrue(data)
self.assertEqual(data["port"], "8081")
|
canonical/candid
|
charms/candid/tests/unit/test_charm.py
|
test_charm.py
|
py
| 577 |
python
|
en
|
code
| 41 |
github-code
|
6
|
2734112142
|
import re
# content = "as busy as a bee"
# r = re.compile(r'as')
# starts from the beginning of the content
# print(r.match(content))
# search anywhere in the content, find the first one
# print(r.search(content))
# returns all of the string content matches without span data
# print(r.findall(content))
# returns match objects for all matches in the content
# print(list(r.finditer(content)))
# content = "red|green;blue:yellow"
# # print(content.split("|").split(";").split(":"))
# # r = re.compile(r"\||:|;")
# r = re.compile(r"[|:;]")
# print(r.split(content))
# print(r.sub(",", content))
# content = """apple
# banana
# apple
# banana
# Banana
# apple
# avocado
# """
# # r = re.compile(r"^a[a-z]*", re.MULTILINE)
# r = re.compile(r"[a-z]*a$", re.MULTILINE | re.IGNORECASE)
# print(list(r.finditer(content)))
# content = "<b>content 1</b><span>test</span><b>content 2</b><div>fun</div>"
# # r = re.compile(r"<span>(.*)</span>")
# # r = re.compile(r"<b>(.*?)</b>")
# r = re.compile(r"<.*?>(.*?)</.*?>")
# # m = r.search(content)
# # print(m.groups())
# for m in r.finditer(content):
# print(m.groups()[0])
# print(list(r.finditer(content)))
r = re.compile(r"^Add: ([0-9]*)", re.MULTILINE)
with open("./report.txt", "r") as report_file:
report_content = report_file.read()
add_count_match = r.search(report_content)
print(add_count_match.groups()[0])
|
t4d-classes/python_10042021
|
python_demos/src/language_demos/reg_exp_demo.py
|
reg_exp_demo.py
|
py
| 1,390 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36156660043
|
import numpy as np
N=9
Adjacence=np.zeros(N)
label = [0]*N # étiquette si le sommet a été parcouru
chemin = [[i] for i in range(N)] # enregistrer le sommet prochain de chaque sommet
chemin_hamiltonien = [0]*N # enregistrer le résultat: un chemin hamiltonien
def init_chemin(): # initialisation du chemin
for i in range(N):
chemin[i][0] = i
def label_test(): # vérifier si tous les sommets sont parcourus
for i in range(N):
if label[i] == 0:
return 0
return 1
def chemin_construire(origine): # une translation de chemin à cycle hamitonien
for i in range(N):
chemin_hamiltonien[i] = origine
origine = chemin[origine][0]
for i in range(N-1):
print("%d -> " % chemin_hamiltonien[i], end='')
print("%d" % chemin_hamiltonien[N-1])
def cycleHamilton(depart, origine): # DFS & Back Propagation pour trouver le cycle hamiltonien
global chemin_hamiltonien
arrive = -1 # Si le sommet peut aller à un autre sommet
for i in range(N):
if Adjacence[depart][i] != 0 and label[i] == 0: # si ce sommet est accessible
arrive = i
label[arrive] = 1 # étiqueter
chemin[depart][0] = arrive # enregistrer
if cycleHamilton(arrive, origine) == 1: # Si trouver un cycle hamiltonien, c'est terminé!
return 1
if arrive == -1 and not label_test(): # Si on a un cycle mais pas un cycle hamiltonien
label[depart] = 0 # On enlève l'étiquette
return 0
if label_test() == 1 and Adjacence[depart][origine] != 0: # C'est la condition pour trouver un cycle hamiltonien
chemin[depart][0] = origine # Connecter l'origine et l'arrivée
return 1
else:
label[depart] = 0
label[arrive] = 0
return 0
def hamiltonien(origine):
global chemin_hamiltonien
label[origine] = 1
cycleHamilton(origine, origine)
chemin_construire(origine)
|
CSolatges/La-tournee-du-facteur
|
Python/HamiltonienC.py
|
HamiltonienC.py
|
py
| 1,963 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
23361556734
|
import datetime
from polls.models import LogModel
class LogMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if request.path.find('admin') != -1:
return response
path = request.path
method = request.method
timestamps = datetime.datetime.now()
LogModel.objects.create(path=path, method=method,
timestamps=timestamps)
return response
|
konstantinkonstantinovich/home_task_6
|
polls/middleware.py
|
middleware.py
|
py
| 549 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23158641917
|
import requests
import json
def get_weather(api_key, city):
url = f"http://api.weatherapi.com/v1/current.json?key={api_key}&q={city}"
response = requests.get(url)
data = json.loads(response.text)
if "error" in data:
print("Failed to fetch weather data.")
else:
temperature = data["current"]["temp_c"]
description = data["current"]["condition"]["text"]
print(f"Temperature: {temperature}°C")
print(f"Description: {description}")
def main():
api_key = "ae2fa0e696154eb699092948232106" # Replace with your WeatherAPI.com API key
city = input("Enter city name: ")
get_weather(api_key, city)
if __name__ == "__main__":
main()
|
Mutukukioko/WeatherApp
|
main.py
|
main.py
|
py
| 703 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12095699545
|
from argparse import ArgumentParser
import json
from tqdm import tqdm
import os, sys
import logging
import re
import gc
import torch
from torch.utils.data import DataLoader
from torch.optim import Adam
from bert_diora.models import BertDiora
from bert_diora.utils import TokenizedLengthSampler
def main(args):
# Set torch
torch.manual_seed(args.torch_seed)
# Set device
device = torch.device(f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu")
# Make checkpoint/log directory
model_store_path = os.path.join(args.model_store_path, args.model_postfix)
try:
os.mkdir(model_store_path)
except FileExistsError:
if args.secure:
prompt = input("WARNING: overwriting directory " + model_store_path + ". Continue? (y/n)")
if prompt != "y":
exit()
# Init logger
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(formatter)
if not args.secure:
# Remove original log file
if os.path.exists(os.path.join(model_store_path, "train.log")):
os.remove(os.path.join(model_store_path, "train.log"))
file_handler = logging.FileHandler(os.path.join(model_store_path, "train.log"))
file_handler.setFormatter(formatter)
logger = logging.getLogger('')
logger.handlers.clear()
logger.addHandler(stdout_handler)
logger.addHandler(file_handler)
logger.setLevel(logging.INFO)
# Log basic info
logger.info("Training arguments:")
for arg, value in sorted(vars(args).items()):
logger.info("- %s: %r", arg, value)
logger.info("")
Arch = {
"diora": BertDiora,
}[args.arch]
model = Arch(
args.model_id,
freeze=not args.unfreeze,
device=device,
loss=args.loss,
loss_margin_k=args.loss_margin_k,
loss_margin_lambda=args.loss_margin_lambda
).to(device)
logger.info(model)
resume_training = False
if args.from_checkpoint is not None:
# Fine-tune from a local checkpoint
assert os.path.isdir(args.model_store_path)
model_load_path = os.path.join(args.model_store_path, args.from_checkpoint)
assert os.path.isdir(model_load_path)
last_checkpoint = sorted([
(int(re.search("epoch_([0-9]*)", f).group(1)), int(re.search("step_([0-9]*)", f).group(1)), f) for f in os.listdir(model_load_path) if f.endswith(".pt")], reverse=True
)[0][2]
model_load_path = os.path.join(model_load_path, last_checkpoint)
model.load_state_dict(torch.load(model_load_path, map_location=device))
model.device = device
model = model.to(device)
if args.from_checkpoint == args.model_postfix:
# If resume training from an error,
resume_training=True
resume_epoch = int(re.search("epoch_([0-9]*)", last_checkpoint).group(1))
resume_step = int(re.search("step_([0-9]*)", last_checkpoint).group(1))
resume_epoch_step = (resume_epoch, resume_step)
logger.info(f"Resume training from checkpoint: epoch {resume_epoch}, step {resume_step}")
# Load data
with open(args.train_data, "r", encoding='UTF-8') as file:
train_data = file.read().splitlines()
with open(args.dev_data, "r", encoding='UTF-8') as file:
dev_data = file.read().splitlines()
train_loader = DataLoader(train_data, batch_sampler=TokenizedLengthSampler(train_data, args.batch_size, seed=args.torch_seed))
dev_loader = DataLoader(dev_data, batch_sampler=TokenizedLengthSampler(dev_data, args.batch_size, seed=args.torch_seed))
# Define optimizer
optimizer = Adam(model.parameters(), lr=args.lr)
optimizer.zero_grad()
min_loss = 1e+10
early_stop_count = 0
loss = 0
for epoch in range(args.epoch): # loop over the dataset multiple times
if resume_training:
# If resume training from an error, skip to the halted epoch/step
if (epoch, len(train_loader) * 100) <= resume_epoch_step:
continue
logger.info(f"< epoch {epoch} >")
# Train phase
model.train()
epoch_size = len(train_loader)
for i, batch in enumerate(tqdm(train_loader, total=epoch_size)):
if resume_training:
# If resume training from an error, skip to the halted epoch/step
if (epoch, i) <= resume_epoch_step:
continue
sent = batch
# try:
if True:
# forward + backward + optimize
loss = model(sent)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if i % args.update_freq == args.update_freq - 1 or i == epoch_size-1:
optimizer.step()
# zero the parameter gradients
optimizer.zero_grad()
loss = 0
# except Exception as e:
# logger.warning(str(e))
# logger.info("Exception occured; returning to training")
# gc.collect()
# torch.cuda.empty_cache()
# gc.collect()
# torch.cuda.empty_cache()
# finally:
# if i % args.update_freq == args.update_freq - 1 or i == epoch_size-1:
# loss = 0
if i % args.log_interval == args.log_interval-1 or i == epoch_size-1:
# Eval phase (on dev set)
model.eval()
with torch.no_grad():
total = len(dev_data)
dev_loss = 0
first_batch=True
for dev_batch in dev_loader:
dev_sents = dev_batch
if first_batch:
# test_input = gen_inputs[0]
# test_outputs = model.generate([test_input])[0]
dev_loss += (model(dev_sents)).item() * len(dev_sents)
first_batch=False
else:
dev_loss += (model(dev_sents)).item() * len(dev_sents)
logger.info("=================================================")
logger.info(f"epoch {epoch}, step {i}")
logger.info(f"dev loss = {dev_loss/total}")
logger.info("")
# logger.info("Test generation result")
# logger.info(f"input: {test_input}")
# logger.info(f"output:")
# for test_output in test_outputs:
# logger.info(f" {test_output}")
# logger.info("")
if dev_loss/total < min_loss:
logger.info(f"Updating min_loss = {min_loss} -> {dev_loss/total}")
min_loss = dev_loss / total
logger.info("Save model checkpoint because reduced loss...")
name = f"Model_{args.model_postfix}_epoch_{epoch}_step_{i+1}.pt"
torch.save(model.state_dict(), os.path.join(model_store_path, name))
early_stop_count = 0
else:
early_stop_count += 1
logger.info(f"Min loss not updated for {early_stop_count} validation routines...")
if early_stop_count >= args.early_stop:
logger.info("Early stopping....")
return
logger.info("=================================================")
if __name__ == "__main__":
parser = ArgumentParser()
# Dataset
parser.add_argument("--train_data", required=True, help="Training set(raw text, linebreaked)")
parser.add_argument("--dev_data", required=True, help="Validation set(raw text, linebreaked)")
# Base model/checkpoint configuration
parser.add_argument("--from_checkpoint", required=False, default=None, help="Pretrained checkpoint to load and resume training.")
parser.add_argument("--model_id", required=False, default="bert-base-uncased", help="Base model for DIORA architecture.")
parser.add_argument("--arch", required=False, default="diora", choices=["diora", "dora"], help="Recursive autoencoder architecture")
parser.add_argument("--loss", required=False, default="cossim", choices=["cossim", "token_ce", "token_margin"], help="Loss function to apply to DIORA")
parser.add_argument("--loss_margin_k", type=int, required=False, default=50, help="(loss=token_margin) How many negative tokens to compare")
parser.add_argument("--loss_margin_lambda", type=float, required=False, default=1.0, help="(loss=token_margin) max-margin value")
parser.add_argument("--max_grad_norm", type=float, required=False, default=5, help="Max L2 norm for radient cipping")
# Hyperparameters
parser.add_argument("--batch_size", type=int, default=8, help="training batch size")
parser.add_argument("--update_freq", type=int, default=1, help="gradient accumulation for virtually larger batches")
parser.add_argument("--lr", type=float, default=2e-3, help="Learning rate (default: Adam optimizer)")
parser.add_argument("--epoch", type=int, default=5, help="epoch count")
parser.add_argument("--unfreeze", action='store_true', help="If set, we also train the underlying parameter too.")
parser.add_argument("--log_interval", type=int, default=20000, help="validating / checkpoint saving interval. Validates at the end of each epoch for default.")
parser.add_argument("--early_stop", type=int, default=4, help="if valid loss does not decrease for `early_stop` validations, stop training.")
# PyTorch/CUDA configuration
parser.add_argument("--gpu", type=int, default=0, help="CUDA index for training")
parser.add_argument("--torch_seed", type=int, default=0, help="torch_seed() value")
# Checkpoint configs
parser.add_argument("--model_store_path", required=False, default='checkpoints', help="Directory to store model checkpoints.")
parser.add_argument("--model_postfix", required=False, help="Name for the model. defaulted to {model_id}-arch")
parser.add_argument("--secure", required=False, action="store_true", help="")
args = parser.parse_args()
# Post-modification of args
if args.model_postfix is None:
short_model_name = args.model_id.split("-")[0].split("_")[0]
args.model_postfix = short_model_name + '-' + args.arch + "-" + args.loss
main(args)
|
jinulee-v/bert_diora
|
train.py
|
train.py
|
py
| 10,660 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70725047228
|
# Databricks notebook source
# MAGIC %md
# MAGIC ### Working on qualifying json files
# COMMAND ----------
from delta.tables import *
# COMMAND ----------
# DBTITLE 1,Run the configuration notebook
# MAGIC %run "../0 - includes/configuration"
# COMMAND ----------
# DBTITLE 1,Run the functions notebook
# MAGIC %run "../0 - includes/functions"
# COMMAND ----------
# DBTITLE 1,Importing libraries and functions
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
from pyspark.sql.functions import lit
# COMMAND ----------
# DBTITLE 1,Reading folder
# df_qualifying = spark.read\
# .schema(qualifying_schema)\
# .option("multiLine", True)\
# .json(f"{landing_folder_path}/qualifying")
df_qualifying = spark.read.parquet(f"{bronze_folder_path}/qualifying")
# COMMAND ----------
# DBTITLE 1,Renaming column and creating new column
df_qualifying = df_qualifying.withColumnRenamed("qualifyId", "qualify_id") \
.withColumnRenamed("driverId", "driver_id") \
.withColumnRenamed("raceId", "race_id") \
.withColumnRenamed("constructorId", "constructor_id")
# COMMAND ----------
# DBTITLE 1,Creating column
df_qualifying = add_date_load_silver(df_qualifying)
# COMMAND ----------
# DBTITLE 1,write output parquet file
#df_qualifying.write.mode("overwrite").parquet(f"{silver_folder_path}/qualifying")
# COMMAND ----------
# df_qualifying.write.mode("overwrite").format("parquet").saveAsTable("f1_silver.qualifying")
# COMMAND ----------
if spark.catalog.tableExists("f1_silver.qualifying"):
df_target = DeltaTable.forPath(spark, '/mnt/adlsformula1/silver/qualifying')
print("upsert")
upsert(df_target,"qualify_id",df_qualifying,"qualify_id")
else:
print("New")
df_qualifying.write.mode("overwrite").format("delta").saveAsTable("f1_silver.qualifying")
# COMMAND ----------
dbutils.notebook.exit("Sucess")
|
diassmith/formula1-project
|
03 - bronze - to - silver/qualifying.py
|
qualifying.py
|
py
| 1,865 |
python
|
en
|
code
| 1 |
github-code
|
6
|
1584228601
|
from django.conf import settings
from cms.models import Title
from minitrue.base import replacer
from minitrue.contrib.django_cms.utils import plugin_get_url
def title_get_url(obj):
return obj.page.get_absolute_url()
replacer.register(Title, fields=['title', 'page_title', 'menu_title', 'redirect', 'meta_description', 'meta_keywords'],
urlgetter=title_get_url, select_related=['page'])
if 'cms.plugins.text' in settings.INSTALLED_APPS:
from cms.plugins.text.models import Text
replacer.register(Text, fields=['body'], urlgetter=plugin_get_url,
select_related=['placeholder__page'])
if 'cms.plugins.snippet' in settings.INSTALLED_APPS:
from cms.plugins.snippet.models import Snippet
replacer.register(Snippet, fields=['html'], select_related=['placeholder__page'])
if 'cms.plugins.file' in settings.INSTALLED_APPS:
from cms.plugins.file.models import File
replacer.register(File, fields=['title'],
urlgetter=plugin_get_url,
select_related=['placeholder__page'],
)
if 'cms.plugins.link' in settings.INSTALLED_APPS:
from cms.plugins.link.models import Link
replacer.register(Link, fields=['name'], urlgetter=plugin_get_url,
select_related=['placeholder__page']
)
if 'cms.plugins.picture' in settings.INSTALLED_APPS:
from cms.plugins.picture.models import Picture
replacer.register(Picture, fields=['alt', 'longdesc'],
urlgetter=plugin_get_url,
select_related=['placeholder__page']
)
if 'cms.plugins.teaser' in settings.INSTALLED_APPS:
from cms.plugins.teaser.models import Teaser
replacer.register(Teaser, fields=['title', 'description'],
urlgetter=plugin_get_url,
select_related=['placeholder__page']
)
if 'cms.plugins.twitter' in settings.INSTALLED_APPS:
from cms.plugins.twitter.models import TwitterRecentEntries, TwitterSearch
replacer.register(TwitterRecentEntries, fields=['title',],
urlgetter=plugin_get_url,
select_related=['placeholder__page']
)
replacer.register(TwitterSearch, fields=['title',],
urlgetter=plugin_get_url,
select_related=['placeholder__page']
)
|
beniwohli/django-minitrue
|
minitrue/contrib/django_cms/searchreplace.py
|
searchreplace.py
|
py
| 2,169 |
python
|
en
|
code
| 4 |
github-code
|
6
|
21393856702
|
import unittest
import sys
# Import the functions to be tested
from floyd_rec import floyd_recursive
from floyd import floyd
class TestFloydAlgorithm(unittest.TestCase):
def setUp(self):
# Initialize test data
self.NO_PATH = sys.maxsize
self.graph = [
[0, 7, self.NO_PATH, 8],
[self.NO_PATH, 0, 5, self.NO_PATH],
[self.NO_PATH, self.NO_PATH, 0, 2],
[self.NO_PATH, self.NO_PATH, self.NO_PATH, 0]
]
self.MAX_LENGTH = len(self.graph[0])
def test_floyd_rec(self):
# Test case 1: Start and end nodes are the same
distance = [[self.NO_PATH] * self.MAX_LENGTH for _ in range(self.MAX_LENGTH)]
intermediate = 0
start_node = 0
end_node = 0
floyd_recursive(distance, intermediate, start_node, end_node)
self.assertEqual(distance[start_node][end_node], 0)
# Test case 2: Start node is different from end node
distance = [[self.NO_PATH] * self.MAX_LENGTH for _ in range(self.MAX_LENGTH)]
intermediate = 1
start_node = 0
end_node = 1
floyd_recursive(distance, intermediate, start_node, end_node)
self.assertEqual(distance[start_node][end_node], 7)
def test_floyd(self):
# Test case 1: Check if floyd function updates distance matrix correctly
distance = [row[:] for row in self.graph]
floyd(distance)
expected_distance = [
[0, 7, 5, 8],
[self.NO_PATH, 0, 5, 7],
[self.NO_PATH, self.NO_PATH, 0, 2],
[self.NO_PATH, self.NO_PATH, self.NO_PATH, 0]
]
self.assertEqual(distance, expected_distance)
if __name__ == '__main__':
unittest.main()
|
ckcelliot/Floyd-Warshall-Algorithm-Task
|
testing.py
|
testing.py
|
py
| 1,792 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33040214351
|
n,m = map(int, input().split())
arr = list (map(int, input().split()))
bound_M = max(arr)
bound_m = min(arr)
flag = result = middle = 0
while 1:
if flag and middle == result: break
sum = 0
for a in arr:
sum += a - middle if a - middle > 0 else 0
bound_m = middle
if sum >= m:
flag = 1
result = middle
elif flag:
bound_M = middle
bound_m = result
middle = int((bound_M + bound_m)/2)
print(middle)
|
ParanMoA/SelfSoftware
|
JeongTIL/2023-01-19/boj/boj_2805.py
|
boj_2805.py
|
py
| 440 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36257621125
|
import sys
sys.setrecursionlimit(10**6)
def dfs(x, y, k, graph_copy):
if x < 0 or x >= n or y < 0 or y >= n:
return False
if graph_copy[x][y] <= k:
return False
graph_copy[x][y] = 0
dfs(x-1, y, k, graph_copy)
dfs(x+1, y, k, graph_copy)
dfs(x, y-1, k, graph_copy)
dfs(x, y+1, k, graph_copy)
return True
if __name__ == "__main__":
n = int(input())
graph = []
for _ in range(n):
graph.append(list(map(int, input().split())))
max_v = max(graph[0])
for g in graph:
if max_v < max(g):
max_v = max(g)
max_g = 0
for k in range(max_v):
graph_copy = [g.copy() for g in graph]
count = 0
for i in range(n):
for j in range(n):
if dfs(i, j, k, graph_copy):
count += 1
if max_g < count:
max_g = count
print(max_g)
|
hon99oo/PythonAlgorithmStudy
|
BOJ/DFS_BFS/2468_안전 영역/solution.py
|
solution.py
|
py
| 905 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28048440530
|
class Cliente:
def __init__(self, nome, senha):
self.nome = nome
self.senha = senha
self.bloqueado = False
self.tentativas = 0
keys = dict()
clientes = dict()
for i in range(12):
numero, *letras = input().split(";")
for letra in letras:
keys[letra] = numero
def converte(str):
return [keys[c] for c in str]
while True:
nome, senha = input().split(";")
if nome == 'fim' and senha == 'fim':
break
clientes[nome] = Cliente(nome, converte(senha))
try:
while True:
nome, *senha = input().split(";")
if not clientes.get(nome):
print("%s: usuario inexistente" % nome)
else:
c = clientes[nome]
if c.bloqueado:
print("%s: usuario bloqueado" % nome)
elif c.senha == senha:
c.tentativas = 0
print("%s: acesso concedido" % nome)
else:
c.tentativas += 1
if c.tentativas >= 3:
c.bloqueado = True
print("%s: usuario bloqueado" % nome)
else:
print("%s: acesso negado" % nome)
except EOFError:
pass
|
pufe/programa
|
2020-11-09/banco.py
|
banco.py
|
py
| 1,264 |
python
|
pt
|
code
| 2 |
github-code
|
6
|
37441140473
|
from beat_tracker import *
file_list="./BallroomData/allBallroomFiles"
def go():
f = open(file_list, 'r')
lines = f.readlines()
for line in lines:
fline=line.strip("./").strip("\n")
beats = beatTracker("./BallroomData/"+fline)
outf=fline.replace(".wav", ".estimate")
f = open("./output/"+outf,"w+")
for beat in beats:
f.write(str(beat)+"\n")
f.close()
go()
|
bineferg/MIR-BeatTracker-DP
|
run-all.py
|
run-all.py
|
py
| 433 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10812133722
|
import sys, time
indent = 1
indentationRise = True
while(True):
try:
if(indentationRise):
time.sleep(0.01)
print(' '*indent + "********")
indent += 1
if(indent>=60):
indentationRise=False
elif(indentationRise==False):
time.sleep(0.01)
print(' ' * indent + "********")
indent -=1
if(indent<=0):
indentationRise = True
except(KeyboardInterrupt):
sys.exit()
|
trytek235/Python_programs
|
makeMy.py
|
makeMy.py
|
py
| 517 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43391129954
|
# 搜索网易云上评论超过几万来着
from selenium import webdriver
class Spider:
page = webdriver.Chrome()
list_ge = []
count = 0
list_url = []
# first_url = "https://music.163.com/#/song?id=31654747"
# list_url.append(first_url)
# print(list_url)
# 获取歌的地址
def get_url(self, url= "https://music.163.com/#/song?id=31654747"):
try:
self.list_url.append(url)
self.page.get(url)
self.page.implicitly_wait(10)
self.page.switch_to_frame("contentFrame")
# 判断评论数、获取歌名
pinglun = self.page.find_element_by_id("cnt_comment_count")
if int(pinglun.text) > 50000:
list_ge = []
ge = self.page.find_element_by_class_name("f-ff2").text
list_ge.append(ge)
# 获取歌曲链接
next_url = self.page.find_elements_by_class_name("s-fc1")[0].get_attribute("href")
# print("next"+next_url)
# print("now"+url)
# 判断如果链接是之前有的就换个链接
for u in self.list_url:
if u == next_url:
next_url = self.page.find_elements_by_class_name("s-fc1")[1].get_attribute("href")
# 递归判断、获取5首
if self.count == 10:
return 1
else:
self.count = self.count+1
# print(self.count)
print(url, ge)
self.get_url(next_url)
except Exception as e:
print(e)
# print(list_url)
spider = Spider()
spider.get_url()
|
frebudd/python
|
wangyiyu_pinglun.py
|
wangyiyu_pinglun.py
|
py
| 1,676 |
python
|
en
|
code
| 2 |
github-code
|
6
|
24981348258
|
"""
Overall configration file, used by the detector_launcher.py and zmqproxy.py
"""
options = dict()
# data configration data_save_dir is dir where the logs will be stored if io mode is True
options["data_save_dir"] = "/home/ubuntu/aminer-deep/data/"
# the file will be used for tranning
options['data_file_name'] = "Ex03_dnsmask/125009"
options["device"] = "cpu"
# currently support one feature, sequentials
options['sequentials'] = True
# Model
options["input_size"] = 1
options["hidden_size"] = 64
options["num_layers"] = 2
options["num_classes"] = 10
# Train
options["batch_size"] = 2048
options["accumulation_step"] = 1
options["optimizer"] = "adam"
options["lr"] = 0.001
options["max_epoch"] = 100
options["lr_step"] = (300, 350)
options["lr_decay_ratio"] = 0.1
options["resume_path"] = None
options["model_name"] = "dnsmask"
options["save_dir"] = "/home/ubuntu/aminer-deep/result/aminer-deep/ex03-dns/"
# Detector
options[
"model_path"
] = "/home/ubuntu/aminer-deep/result/aminer-deep/ex03-dns/dnsmask_last.pth"
options["num_candidates"] = 1
# ZMQ configration, the endpoint is presented from proxy point of view
options["zmq_pub_endpoint"] = "tcp://127.0.0.1:5559"
options["zmq_sub_endpoint"] = "tcp://127.0.0.1:5560"
options["zmq_aminer_top"] = "aminer"
options["zmq_detector_top"] = "deep-aminer"
options["learn_mode"] = True
|
ait-aecid/aminer-deep
|
config.py
|
config.py
|
py
| 1,347 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10442912320
|
import requests
from bs4 import BeautifulSoup
import html5lib
"""THE BELOW REQUEST CAN BE MODIFIED TO GET MORE DATA BY CHANGING THE /page/1 to any page no"""
r=requests.get('https://cutoffs.aglasem.com/page/1')
s=BeautifulSoup(r.content,'html5lib')
jc=s.find(class_="jeg_posts jeg_load_more_flag")
for i in range(0,len(jc)-2):
v=jc.find_all('article')[i]
t=v.find('div',class_="jeg_postblock_content")
title=t.find('h3').find('a').getText()
link=t.find('h3').find('a')['href']
print(title,link)
|
fredysomy/web-scrape-data
|
college-cuttofs-updates.py
|
college-cuttofs-updates.py
|
py
| 522 |
python
|
en
|
code
| 2 |
github-code
|
6
|
39961449850
|
#!/usr/bin/env python
# -- coding: utf-8 --
import numpy
from tf import transformations, TransformListener
import rospy
import geometry_msgs
import math
class TransformerTool:
def __init__(self, target_frame=None, source_frame=None):
self.target_frame = target_frame
self.source_frame = source_frame
if target_frame is not None and source_frame is not None:
self.mat44 = self.asMatrix(
target_frame=target_frame, source_frame=source_frame)
self.mat44Reserver = self.asMatrix(
target_frame=source_frame, source_frame=target_frame)
def quat2rvec(self, quat):
'四元数=>旋转角'
theta = math.acos(quat[3]) * 2
if theta < 0.001:
return [0, 0, 0]
else:
axis = [x / math.sin(theta) for x in quat[0:3]]
norm = math.sqrt(axis[0] * axis[0] + axis[1]
* axis[1] + axis[2] * axis[2])
rvec = [x * theta / norm for x in axis]
return rvec
def rvec2quat(self, rvec):
'旋转角=>四元数'
theta = math.sqrt(rvec[0] * rvec[0] + rvec[1]
* rvec[1] + rvec[2] * rvec[2])
if theta < 0.001:
return [0, 0, 0, 1]
else:
axis = [x / theta for x in rvec]
sht = math.sin(theta * 0.5)
quat = [x * sht for x in axis]
quat.append(math.cos(theta * 0.5))
return quat
def transformPoseWithFrame(self, target_frame, source_frame, pose):
'位姿在不同坐标系下的变换'
mat44 = self.asMatrix(target_frame=target_frame,
source_frame=source_frame)
return self._transformPose(mat44=mat44, pose=pose)
def transformPose(self, pose):
return self._transformPose(mat44=self.mat44, pose=pose)
def _transformPose(self, mat44, pose):
pose44 = numpy.dot(self.xyz_to_mat44(pose.position),
self.xyzw_to_mat44(pose.orientation))
txpose = numpy.dot(mat44, pose44)
# print(txpose)
xyz = tuple(transformations.translation_from_matrix(txpose))[:3]
quat = tuple(self.quaternion_from_matrix(txpose))
# print(quat)
return geometry_msgs.msg.Pose(geometry_msgs.msg.Point(*xyz), geometry_msgs.msg.Quaternion(*quat))
def asMatrix(self, target_frame, source_frame):
tran = TransformListener()
tran.waitForTransform(
target_frame=target_frame, source_frame=source_frame, time=rospy.Time(0), timeout=rospy.Duration(4.0))
translation, rotation = tran.lookupTransform(target_frame=target_frame,
source_frame=source_frame, time=rospy.Time(0))
return self.fromTranslationRotation(translation, rotation)
def fromTranslationRotation(self, translation, rotation):
return numpy.dot(transformations.translation_matrix(translation), transformations.quaternion_matrix(rotation))
def xyz_to_mat44(self, pos):
return transformations.translation_matrix((pos.x, pos.y, pos.z))
def xyzw_to_mat44(self, ori):
return transformations.quaternion_matrix((ori.x, ori.y, ori.z, ori.w))
def transformQuaternion(self, quaternion):
return self._transformQuaternion(self.mat44, quaternion)
def transformQuaternionWithFrame(self, target_frame, source_frame, quaternion):
mat44 = self.asMatrix(target_frame=target_frame,
source_frame=source_frame)
return self._transformQuaternion(mat44, quaternion)
def _transformQuaternion(self, mat44, quaternion):
pose44 = self.xyzw_to_mat44(quaternion)
txpose = numpy.dot(mat44, pose44)
# TODO:修改转换矩阵
# quat = tuple(transformations.quaternion_from_matrix(txpose))
quat = tuple(self.quaternion_from_matrix(txpose))
return geometry_msgs.msg.Quaternion(*quat)
def quaternion_from_matrix(self,matrix):
"""
自定义转换矩阵,用于替代tf相关函数,避免突变,
采用tf变换函数时,当右臂旋转到一定角度后,出现较大幅度变化
暂不能确定是否会出现其它问题
"""
q = numpy.empty((4, ), dtype=numpy.float64)
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
t = numpy.trace(M)
# if t > M[3, 3]:
q[3] = t
q[2] = M[1, 0] - M[0, 1]
q[1] = M[0, 2] - M[2, 0]
q[0] = M[2, 1] - M[1, 2]
# else:
# i, j, k = 0, 1, 2
# if M[1, 1] > M[0, 0]:
# i, j, k = 1, 2, 0
# if M[2, 2] > M[i, i]:
# i, j, k = 2, 0, 1
# t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
# q[i] = -t
# q[j] = -(M[i, j] + M[j, i])
# q[k] = -(M[k, i] + M[i, k])
# q[3] = -(M[k, j] - M[j, k])
q *= 0.5 / math.sqrt(t * M[3, 3])
return q
|
6VV/vr-robot-back
|
robot/robot_control/TransformerTool.py
|
TransformerTool.py
|
py
| 5,042 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42510539573
|
import os
from cffi import FFI
from OpenSSL.SSL import Context as SSLContext, _ffi, _lib as lib
from utils import OutputGrabber
ffi = FFI()
NULL = ffi.NULL
ffi.cdef(
"int SSL_CTX_set_client_cert_engine(void *ctx, void *e);"
"int ENGINE_set_default(void *e, unsigned int flags);"
)
libcrypto = ffi.dlopen("libcrypto-1_1.dll")
libssl = ffi.dlopen("libssl-1_1.dll")
class ENGINE_DEFAULT:
ALL = 0xFFFF
class CAPI_LIST_DISP_FMT:
SUMMARY = 1
FRIENDLY_NAME = 2
FULL = 4
PEM = 8
XXX = 16
PRIV_KEY_INFO = 32
class SSLEngine(object):
def __init__(self, id: str | FFI.CData) -> None:
if isinstance(id, str):
try:
eng = SSLEngine.load_by_id(id)
except Exception:
eng = SSLEngine.load_dynamic(id)
ptr = eng.ptr
elif isinstance(id, SSLEngine):
ptr = id.ptr
else:
ptr = id
self.ptr = ptr
def init(self):
if not lib.ENGINE_init(self.ptr):
self.__exit__()
raise Exception("Could not initialize engine")
def free(self):
lib.ENGINE_free(self.ptr)
def __enter__(self):
self.init()
return self
def __exit__(self, type, value, traceback):
self.free()
def set_default(self, flags: int = ENGINE_DEFAULT.ALL):
if not libcrypto.ENGINE_set_default(self.ptr, flags):
self.free()
raise Exception(
"Not able to set engine as default for all flags:%s" % flags
)
def ctrl_cmd_string(
self,
cmd: str,
value: str | None = None,
optional: bool = False,
capture: bool = False,
) -> None | bytes:
io: None | OutputGrabber = None
if capture:
io = OutputGrabber(threaded=True)
io.start()
if not lib.ENGINE_ctrl_cmd_string(
self.ptr,
cmd.encode("utf-8"),
NULL if value == None else value.encode("utf-8"),
1 if optional else 0,
):
if capture:
io.stop()
raise Exception(
"Error with engine string control command: %s%s"
% (cmd, "" if value == None else ":" + value)
)
if capture:
io.stop()
return io.captured
def load_by_id(id: str):
if not id:
raise ValueError("Id value must be provided")
lib.ENGINE_load_builtin_engines()
ptr = lib.ENGINE_by_id(id.encode())
if ptr == NULL:
raise ValueError("Could not load the {0} engine by id".format(id))
return SSLEngine(ptr)
def load_dynamic(
id: str,
path: str = None,
search_path: str = None,
check_version: bool = True,
):
if not id:
raise ValueError("Id value must be provided")
dyn = SSLEngine.load_by_id("dynamic")
dyn.ctrl_cmd_string("ID", id)
if path:
dyn.ctrl_cmd_string("SO_PATH", path)
dyn.ctrl_cmd_string("LIST_ADD", "1")
if not check_version:
dyn.ctrl_cmd_string("NO_VCHECK", "1")
if search_path == None and path == None and "OPENSSL_ENGINES" in os.environ:
search_path = os.environ ["OPENSSL_ENGINES"]
if search_path:
dyn.ctrl_cmd_string("DIR_LOAD", "2")
dyn.ctrl_cmd_string("DIR_ADD", search_path)
dyn.ctrl_cmd_string("LOAD")
return dyn
class CAPIEngine(SSLEngine):
def __init__(self, src: FFI.CData | str | SSLEngine | None = None) -> None:
super().__init__("capi" if src == None else src)
def set_store(self, name: str):
self.ctrl_cmd_string("store_name", name)
def list_certs(
self, store: str | None = None, format: int | None = None
) -> list[bytes]:
if format:
self.ctrl_cmd_string("list_options", str(format))
if store:
self.set_store(store)
return [
cert.split(sep=b"\n", maxsplit=1)[1]
for cert in self.ctrl_cmd_string("list_certs", capture=True)
.strip(b"\n")
.split(b"\nCertificate ")
]
def set_client_cert_engine(self: SSLContext, engine: FFI.CData | SSLEngine):
if not libssl.SSL_CTX_set_client_cert_engine(
self._context, engine.ptr if isinstance(engine, SSLEngine) else engine
):
raise Exception("Was not able to set client cert engine")
SSLContext.set_client_cert_engine = set_client_cert_engine
|
jose-pr/openssl-engines
|
src/openssl_engines.py
|
openssl_engines.py
|
py
| 4,561 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28806078956
|
from typing import Dict
from typing import Iterator
from typing import List
from jira.resources import Board
from ..exceptions import QueryError
from ..plugin import BaseSource
from ..types import SchemaRow
class Source(BaseSource):
SCHEMA: List[SchemaRow] = [
SchemaRow.parse_obj({"id": "id", "type": "int"}),
SchemaRow.parse_obj({"id": "name", "type": "str"}),
SchemaRow.parse_obj({"id": "type", "type": "str"}),
]
def __iter__(self) -> Iterator[Dict]:
start_at = 0
max_results = 2**32
result_limit = self.query.limit or 2**32
if self.query.order_by:
raise QueryError(
"Board query 'order_by' expressions are not supported. "
"Use 'sort_by' instead."
)
if self.query.expand:
raise QueryError("Board query 'expand' expressions are not supported.")
where = self.query.where or {}
if where and not isinstance(where, dict):
raise QueryError(
"Board query 'where' expressions should be a dictionary "
"having any of the following keys: 'type' or 'name'"
)
param_type = where.pop("type", None)
param_name = where.pop("name", None)
if where:
raise QueryError(f"Unexpected 'where' parameters: {where}.")
self.update_progress(completed=0, total=1, visible=True)
while start_at < min(max_results, result_limit):
results = self.jira.boards(
startAt=start_at,
maxResults=min(result_limit, 100),
type=param_type,
name=param_name,
)
max_results = results.total
count = min([results.total, result_limit])
self.update_count(count)
for result in results:
self.update_progress(advance=1, total=count, visible=True)
yield result.raw
start_at += 1
# Return early if our result limit has been reached
if start_at >= result_limit:
break
def rehydrate(self, value: Dict) -> Board:
return Board(
{"agile_rest_path": self.jira._options["agile_rest_path"]}, None, value
)
|
coddingtonbear/jira-select
|
jira_select/sources/boards.py
|
boards.py
|
py
| 2,300 |
python
|
en
|
code
| 22 |
github-code
|
6
|
39542654444
|
import requests
import json
import csv
headers = {
'Authorization': '',
'API-Key': '',
'Accept': 'application/json',
}
p = {
'severities': ''
}
response = requests.get('https://apptwo.contrastsecurity.com/Contrast/api/ng/ORGID/traces/APPID/filter', params=p,headers=headers)
app = requests.get('https://apptwo.contrastsecurity.com/Contrast/api/ng/ORGID/applications/APPID/', headers=headers)
result=json.loads(response.text)
appName=json.loads(app.text)
print(result)
"""
with open('contrast.csv', mode='w') as csv_file:
fieldnames=['AppName','VulnID', 'Title', 'Status', 'Severity']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for i in range (0, len(result['traces'])):
writer.writerow({'AppName': appName['application']['name'],'VulnID': result['traces'][i]['uuid'], 'Title': result['traces'][i]['title'], 'Status': result['traces'][i]['status'], 'Severity': result['traces'][i]['severity']})
"""
|
abridgel-zz/scripts
|
lab3.py
|
lab3.py
|
py
| 976 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9379983030
|
# 2022.06.06
# 풀이 시간 21분 32초
# 채점 결과: 시간 초과 -> 정답
# 시간복잡도: O(N)
# 문제 링크: https://www.acmicpc.net/problem/4358
import sys
input = sys.stdin.readline
forest = {}
result = 0
while True:
tree = input().rstrip()
if not tree:
break
result += 1
if tree in forest.keys():
forest[tree] += 1
else:
forest[tree] = 1
tree_list = sorted(forest.keys())
for tree_name in tree_list:
print("%s %.4f" % (tree_name, forest[tree_name] * 100 / result))
|
Source-Machine-Ent/Algorithm-class
|
ningpop/4358.py
|
4358.py
|
py
| 535 |
python
|
en
|
code
| 2 |
github-code
|
6
|
79843457
|
import numpy as np
from scipy.linalg import lstsq
from optimal_control.basis import Basis
from optimal_control.examples.discrete import StoppingExample
from optimal_control.solvers.discrete import DiscreteValueFunction
class ModifiedForStopping(DiscreteValueFunction):
def __init__(self, example: StoppingExample, x_basis: Basis, I: int = 0, positive_continuation=True):
super().__init__(example)
self.positive_continuation = positive_continuation
J = self.n_time_steps - 2
self.x_basis = x_basis
self.y_max = 1
self.regression_coefficients = np.zeros((J + 1, x_basis.dimension + 1, I + 1))
self.I = I if (I <= J) else J
self.basis_normalization = np.ones((J + 1, x_basis.dimension))
self.reinforced_basis_normalization = np.ones((J + 1, I + 1))
def value_and_policy(self, j, Y_j, X_j, depth=0, **kwargs):
m, _ = X_j.shape
J = self.n_time_steps - 2
VH = np.zeros((m, 2))
mask = (Y_j[:, 0] == 0)
FX = self.x_basis.transform(X_j[mask])
m_, _ = FX.shape
I_ = min(self.I, J - j)
H = np.zeros((m_, I_ + 1))
for i in range(I_ + 1):
H[:, i] = self.example.g(j + i, X_j[mask])
VH[mask] = self.__vh__(j, FX, I_, H)
return VH
def fit(self, X):
if np.ndim(X) == 2:
m, n = X.shape
X = X.reshape(m, n, 1)
m, n, d = X.shape
J = self.n_time_steps - 2
I = self.I
x_basis_dimension = self.x_basis.dimension
H = np.zeros((m, 2, I + 1))
H[:, 0, 0] = self.example.g(J + 1, X[:, J + 1])
FX = np.zeros((m, 2, self.regression_coefficients.shape[1]))
FX[:, 0, :x_basis_dimension] = self.x_basis.transform(X[:, J + 1, :])
for j in range(J, -1, -1):
ModifiedForStopping.__print_progression__(j, J)
FX[:, 1, :x_basis_dimension] = FX[:, 0, :x_basis_dimension]
FX[:, 0, :x_basis_dimension] = self.x_basis.transform(X[:, j, :])
H[:, 1] = H[:, 0]
for i in range(min(I, J - j) + 1):
H[:, 0, i] = self.example.g(j + i, X[:, j])
z = self.__vh__(j + 1, FX[:, 1, :x_basis_dimension], min(I, J - (j + 1)), H[:, 1])[:, 0]
if (j == 0) and (FX[:, 0, 1].var() == 0): # Only if index 0 basis function is the constant function!
z_mean = z.mean()
self.regression_coefficients[0, 0, I] = z_mean
else:
for i in range(min(I, J - j) + 1):
if i < I - j:
continue
if i == 0:
res = lstsq(FX[:, 0, :x_basis_dimension], z)[0]
self.regression_coefficients[j, :x_basis_dimension, 0] = res
else:
f = self.__vh__(j + 1, FX[:, 0, :x_basis_dimension], i - 1, H[:, 0, 1:])[:, 0]
FX[:, 0, -1] = f
res = lstsq(FX[:, 0, :], z)[0]
self.regression_coefficients[j, :, i] = res
def __vh__(self, j: int, FX, i: int, H):
m, basis_dimension = FX.shape
J = self.n_time_steps - 2
VH = np.zeros((m, 2))
VI = np.zeros((m, 2))
V = np.zeros((m, i + 1))
C = np.zeros((m, i + 1))
if j == J + 1:
VH[:, 1] = 0
VH[:, 0] = 0
else:
assert J - j >= i, "Only {}-steps to go backwards, but depth is {}.".format(J - j, i)
for u in range(0, i + 1):
s = j + i - u
C[:, s - j] = np.dot(FX, self.regression_coefficients[s, :basis_dimension, u])
if u > 0:
C[:, s - j] += V[:, s - j + 1] * self.regression_coefficients[s, -1, u]
if self.positive_continuation:
C[:, s - j] = np.maximum(C[:, s - j], 0)
VI[:, 0] = C[:, s - j]
VI[:, 1] = H[:, s - j]
if s > j:
V[:, s - j] = np.max(VI, axis=1)
if s == j:
arg_max = np.expand_dims(np.argmax(VI, axis=1), axis=1)
VH[:, 0] = np.take_along_axis(VI, arg_max, axis=1)[:, 0]
VH[:, 1] = arg_max[:, 0]
return VH
def value_all_y(self, j, X_j):
m = X_j.shape[0]
V = np.zeros((m, 2))
V[:, 0] = self.evaluate(j, np.zeros((m, 1)), X_j)
return V
@staticmethod
def __print_progression__(i, n):
print("{}/{} <-".format(i, n), flush=True, end="")
print(end="\r", flush=True)
|
hagerpa/reinforced_optimal_control
|
optimal_control/solvers/discrete/value_function/modified_for_stopping.py
|
modified_for_stopping.py
|
py
| 4,639 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9379888880
|
# 2022.05.12
# 풀이 시간 98분 47초
# 채점 결과: 오답 -> 시간초과 -> 런타임 에러 -> 정답
# 시간복잡도: O(N*M)
# 문제 링크: https://www.acmicpc.net/problem/1103
import sys
sys.setrecursionlimit(100000)
input = sys.stdin.readline
def dfs(x: int, y: int, count: int) -> int:
global is_visited, max_count
max_count = max(max_count, count)
for i in range(4):
nx = x + (dx[i] * board[x][y])
ny = y + (dy[i] * board[x][y])
if nx < 0 or ny < 0 or nx >= n or ny >= m:
continue
if board[nx][ny] == 'H':
continue
if is_visited[nx][ny]:
print(-1)
exit()
if count + 1 <= dp[nx][ny]:
continue
dp[nx][ny] = count + 1
is_visited[nx][ny] = True
dfs(nx, ny, count + 1)
is_visited[nx][ny] = False
n, m = map(int, input().split())
board = []
for _ in range(n):
board.append([ i if i.isalpha() else int(i) for i in list(input().rstrip()) ])
dx = [0, 1, 0, -1]
dy = [1, 0, -1, 0]
max_count = 0
is_visited = [ [False] * m for _ in range(n) ]
dp = [ [0] * m for _ in range(n) ]
dfs(0, 0, 0)
print(max_count + 1)
|
Source-Machine-Ent/Algorithm-class
|
ningpop/1103.py
|
1103.py
|
py
| 1,190 |
python
|
en
|
code
| 2 |
github-code
|
6
|
39574197449
|
#coding:utf8
#字典
#作用:存多个值,key-value存取,取值速度快
#定义:key必须是不可变类型,value可以是任意类型
#1 有如下值集合 [11,22,33,44,55,66,77,88,99,90...],将所有大于 66 的值保存至字典的第一个key中,将小于 66 的值保存至第二个key的值中
#即: {'k1': 大于66的所有值, 'k2': 小于66的所有值}
# a = {'k1':[],'k2':[]}
# c = [11,22,33,44,55,66,77,88,99]
#
# for i in c:
# if i >66:
# a['k1'].append(i)
# else:
# a['k2'].append(i)
#
# print(a)
#统计单词的个数
#结果如:{'hello': 2, 'alex': 2, 'say': 1, 'sb': 2}
s='hello alex alex say hello sb sb'
#第一种
# l = s.split()
# dic={}
# for item in l:
# if item in dic:
# dic[item]+=1
# else:
# dic[item]=1
# print(dic)
#第二种 个人比较理解
# dic={}
# words=s.split()
# print(words)
# for word in words:
# dic[word]=s.count(word)
# print(dic)
#第三种
#利用setdefault解决重复赋值
'''
setdefault的功能
1:key存在,则不赋值,key不存在则设置默认值
2:key存在,返回的是key对应的已有的值,key不存在,返回的则是要设置的默认值
d={}
print(d.setdefault('a',1)) #返回1
d={'a':2222}
print(d.setdefault('a',1)) #返回2222
'''
# dic={}
# words=s.split()
# for word in words:
# dic.setdefault(word,s.count(word))
# print(dic)
#第四种
#利用集合,去掉重复,减少循环次数
s='hello alex alex say hello sb sb'
dic={}
words=s.split()
words_set=set(words)
for word in words_set:
dic[word]=s.count(word)
print(dic)
|
xueyes/py3_study
|
zidian_key.py
|
zidian_key.py
|
py
| 1,629 |
python
|
zh
|
code
| 1 |
github-code
|
6
|
34197097202
|
import numpy as np
import threading
import time
from datetime import datetime
import jderobot
import math
import cv2
from math import pi as pi
time_cycle = 80
class MyAlgorithm(threading.Thread):
def __init__(self, pose3d, laser1, laser2, laser3, motors):
self.pose3d = pose3d
self.laser1 = laser1
self.laser2 = laser2
self.laser3 = laser3
self.motors = motors
self.StopTaxi = False
self.goForward = False
self.turn1 = False
self.startTime = 0
self.startTimePark = 2
self.DIST_REAR_SPOT = 6.3
self.DIST_REAR_CARY = 4.2
self.DIST_REAR_CARX = 2.2
self.DIST_RIGHT = 3.5
self.MARGIN1 = 0.2
self.MARGIN2 = 0.15
self.YAW_MAX = 1.05
self.YAW_MARGIN = 0.02
self.DIST_MAX = 20
self.stop_event = threading.Event()
self.kill_event = threading.Event()
self.lock = threading.Lock()
threading.Thread.__init__(self, args=self.stop_event)
def parse_laser_data(self,laser_data):
laser = []
for i in range(laser_data.numLaser):
dist = laser_data.distanceData[i]/1000.0
angle = math.radians(i)
laser += [(dist, angle)]
return laser
def get_laser_vector(self,laser_array):
laser_vectorized = []
for d,a in laser_array:
# (4.2.1) laser into GUI reference system
x = d * math.cos(a) * -1
y = d * math.sin(a) * -1
v = (x,y)
laser_vectorized += [v]
return laser_vectorized
def run (self):
while (not self.kill_event.is_set()):
start_time = datetime.now()
if not self.stop_event.is_set():
self.execute()
finish_Time = datetime.now()
dt = finish_Time - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
#print (ms)
if (ms < time_cycle):
time.sleep((time_cycle - ms) / 1000.0)
def stop (self):
self.stop_event.set()
def play (self):
if self.is_alive():
self.stop_event.clear()
else:
self.start()
def kill (self):
self.kill_event.set()
def absolutas2relativas(self, x, y, rx, ry, rt):
# Convert to relatives
dx = x - rx
dy = y - ry
# Rotate with current angle
x = dx*math.cos(-rt) - dy*math.sin(-rt)
y = dx*math.sin(-rt) + dy*math.cos(-rt)
return x,y
def driveArc(self, speed, angleTurn):
self.motors.sendV(speed)
self.motors.sendW(angleTurn)
def execute(self):
# TODO
# Get the position of the robot
xCar = self.pose3d.getX()
yCar = self.pose3d.getY()
# We get the orientation of the robot with respect to the map
yawCar = self.pose3d.getYaw()
# Get the data of the laser sensor, which consists of 180 pairs of values
laser_data_Front = self.laser1.getLaserData()
laserFront = self.parse_laser_data(laser_data_Front)
laser_data_Rear = self.laser2.getLaserData()
laserRear = self.parse_laser_data(laser_data_Rear)
laser_data_Right = self.laser3.getLaserData()
laserRight = self.parse_laser_data(laser_data_Right)
laserFront_vectorized = self.get_laser_vector(laserFront)
laserRear_vectorized = self.get_laser_vector(laserRear)
laserRight_vectorized = self.get_laser_vector(laserRight)
# Average of the 180 values of the laser
laserFront_mean = np.mean(laserFront_vectorized, axis=0)
laserRear_mean = np.mean(laserRear_vectorized, axis=0)
laserRight_mean = np.mean(laserRight_vectorized, axis=0)
if self.StopTaxi == False:
if(self.DIST_RIGHT-self.MARGIN1)<=abs(laserRight_mean[1])<=(self.DIST_RIGHT+self.MARGIN1) and (self.DIST_REAR_SPOT-self.MARGIN1)<=abs(laserRear_mean[1])<=(self.DIST_REAR_SPOT+self.MARGIN1):
# If the taxi is alligned with the car in front of the parking spot the taxi stops
self.motors.sendV(0)
self.StopTaxi = True
if self.startTime == 0:
self.startTime = time.time()
else:
# If the taxi did not get to the car ahead, the taxi drives forward
self.motors.sendV(20)
else:
if (time.time() - self.startTime) <= self.startTimePark:
# The taxi stopped for a while
self.motors.sendV(0)
else:
if self.goForward == False:
# The taxi goes backward
if yawCar <= self.YAW_MAX and self.turn1 == False:
# The car is getting into the parking space
self.driveArc(-3, pi/4)
else:
# The taxi straightens
self.turn1 = True
self.driveArc(-3, -pi/7)
if (self.DIST_REAR_CARY-self.MARGIN2) <= abs(laserRear_mean[1]) <= (self.DIST_REAR_CARY+self.MARGIN2):
# If the taxi is very close to the car from behind, it stop
self.goForward = True
self.motors.sendV(0)
self.motors.sendW(0)
else:
if yawCar <= -self.YAW_MARGIN or yawCar >= self.YAW_MARGIN:
# The taxi rectifies
self.driveArc(1, -pi/2)
else:
# When the car is straight, it stops and rectifies until it is centered in the parking spot
self.motors.sendW(0)
if (laser_data_Front.distanceData[90]/10 - laser_data_Rear.distanceData[90]/10) > self.DIST_MAX:
self.motors.sendV(2)
elif (laser_data_Rear.distanceData[90]/10 - laser_data_Front.distanceData[90]/10) > self.DIST_MAX:
self.motors.sendV(-2)
else:
# The taxi is parked
print('CAR PARKED')
self.motors.sendV(0)
|
RoboticsLabURJC/2016-tfg-irene-lope
|
AutoPark_Practice/MyAlgorithm.py
|
MyAlgorithm.py
|
py
| 6,482 |
python
|
en
|
code
| 1 |
github-code
|
6
|
21986767676
|
"""
Fixer for bytes -> str.
"""
import re
from crosswind import fixer_base
from crosswind.fixer_util_3to2 import Call, Comma, Name, parse_args, syms, token
from crosswind.patcomp import compile_pattern
_literal_re = re.compile(r"[bB][rR]?[\'\"]")
class FixBytes(fixer_base.BaseFix):
order = "pre"
PATTERN = "STRING | power< 'bytes' [trailer< '(' (args=arglist | any*) ')' >] > | 'bytes'"
def transform(self, node, results):
name = results.get("name")
arglist = results.get("args")
if node.type == token.NAME:
return Name("str", prefix=node.prefix)
elif node.type == token.STRING:
if _literal_re.match(node.value):
new = node.clone()
new.value = new.value[1:]
return new
if arglist is not None:
args = arglist.children
parsed = parse_args(args, ("source", "encoding", "errors"))
source, encoding, errors = (parsed[v] for v in ("source", "encoding", "errors"))
encoding.prefix = ""
str_call = Call(Name("str"), ([source.clone()]))
if errors is None:
node.replace(Call(Name(str(str_call) + ".encode"), (encoding.clone(),)))
else:
errors.prefix = " "
node.replace(Call(Name(str(str_call) + ".encode"), (encoding.clone(), Comma(), errors.clone())))
|
ryanwersal/crosswind
|
fixer_suites/three_to_two/fixes/fix_bytes.py
|
fix_bytes.py
|
py
| 1,410 |
python
|
en
|
code
| 11 |
github-code
|
6
|
7194454936
|
# THINGS TO DO
# Isolates + Member + Star < Bridge < Organizer
import networkx as nx
from community import community_louvain
import pandas as pd
import operator
# ORGANIZER/LIAISON/BROKER
G = nx.read_weighted_edgelist('Only_50_Employees1.csv', delimiter=',', create_using = nx.DiGraph(), nodetype=str)
page_score = dict(nx.pagerank(G))
eigen_score = dict(nx.eigenvector_centrality(G))
betweenness_score = dict(nx.betweenness_centrality(G))
mydicts = [page_score, betweenness_score, eigen_score]
df = pd.concat([pd.Series(d) for d in mydicts], axis=1).fillna(0).T
df.index = ['page_score', 'betweenness_score', 'eigen_score']
df = df.transpose()
del page_score, eigen_score, betweenness_score, mydicts
df = (df - df.mean()) / (df.max() - df.min())
minus_columns = ['page_score', 'betweenness_score', 'eigen_score']
df = df[minus_columns] + 1
df['score'] = df['page_score'] + df['betweenness_score'] + df['eigen_score']
del df['page_score'], df['betweenness_score'], df['eigen_score']
score_dict = df['score'].to_dict()
n = int(len(score_dict) * 0.10)
organizer_dict = dict(sorted(score_dict.items(), key=operator.itemgetter(1), reverse=True)[:n])
organizer_dict = {x: 0 for x in organizer_dict}
del score_dict, df, n, minus_columns
# BRIDGE/GATEKEEPER
G = nx.read_weighted_edgelist('Only_50_Employees1.csv', delimiter=',', create_using = nx.Graph(), nodetype=str)
gatekeeper = dict(nx.bridges(G))
gatekeeper_dict = {k: v for k, v in gatekeeper.items() if k not in organizer_dict}
gatekeeper_dict = {x: 1 for x in gatekeeper_dict}
del gatekeeper
# STAR/TEAM-PLAYER
G = nx.read_weighted_edgelist('Only_50_Employees1.csv', delimiter=',', create_using = nx.Graph(), nodetype=str)
part = community_louvain.best_partition(G) # Finding Communities
invert_partition = {v: k for k, v in part.items()}
star_dict = {} # iterate over each community
for community_id in invert_partition.keys(): #Extract the sub graph containing the community nodes
temp_graph = G.subgraph(invert_partition[community_id])
temp_degree = dict(temp_graph.degree()) #Extract the degrees in the subgraph
star_dict[community_id] = max(temp_degree, key=lambda x: temp_degree[x]) #Store it in a dictionary, with key as community_id and value as the node with max degree
star_dict = dict((v,k) for k,v in sorted(star_dict.items(), key=operator.itemgetter(1)))
star_dict = {k: v for k, v in star_dict.items() if k not in organizer_dict}
star_dict = {k: v for k, v in star_dict.items() if k not in gatekeeper_dict}
star_dict = {x: 2 for x in star_dict}
del community_id, invert_partition, part, temp_degree
# ISOLATES
isolate_dict = dict(G.degree())
isolate_dict = {key:val for key, val in isolate_dict.items() if val == 1 or 0}
isolate_dict = {x: 3 for x in isolate_dict}
# Integration of Final Appointed Roles
final_roles = {**organizer_dict, **gatekeeper_dict, **star_dict, **isolate_dict}
del organizer_dict, gatekeeper_dict, star_dict, isolate_dict
|
AnnaMudano/Msc-Students
|
Unofficial_Roles_Script.py
|
Unofficial_Roles_Script.py
|
py
| 3,132 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6518783432
|
#!/usr/bin/env python
import datetime
from elasticsearch import Elasticsearch
from jobs.lib import Configuration
from jobs.lib import Send_Alert
local_config = {
"minutes": 5,
"index": "servers-*",
"max_results": 1000,
"severity": "low"
}
# Query goes here
search_query = {
"query": {
"bool": {
"must": [],
"filter": [
{
"range": {
"@timestamp": {
"format": "strict_date_optional_time",
"gte": datetime.datetime.utcnow() - datetime.timedelta(minutes=local_config["minutes"]),
"lte": datetime.datetime.utcnow()
}
}
},
{
"match_phrase": {
"winlog.channel": "Security"
}
},
{
"match_phrase": {
"winlog.event_id": "4740"
}
}
], }}, }
def init():
config = Configuration.readconfig()
connection = str(config["elasticsearch"]["connection"])
es = Elasticsearch([connection], verify_certs=False, ssl_show_warn=False)
res = es.search(index=local_config["index"], body=search_query, size=local_config["max_results"])
# Iterate through results
for doc in res.get('hits', {}).get('hits'):
username = doc.get('_source', {}).get('user', {}).get('target', {}).get('name')
Send_Alert.send(username + " account was locked in AD", local_config["severity"])
|
0xbcf/elasticsearch_siem
|
jobs/LockedADAccount.py
|
LockedADAccount.py
|
py
| 1,430 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70211332028
|
from valohai import Pipeline
def main(config) -> Pipeline:
#Create a pipeline called "mypipeline".
pipe = Pipeline(name="sharkpipe", config=config)
# Define the pipeline nodes.
fetch = pipe.execution("fetch_data")
process = pipe.execution("pre_process")
pepare_text = pipe.execution("pepare_text")
fine_tune = pipe.execution("experiment")
# Configure the pipeline, i.e. define the edges.
fetch.output("*").to(process.input("attacksmini"))
process.output("*").to(pepare_text.input("attacksminiprocessed"))
pepare_text.output("train.csv").to(fine_tune.input("train"))
pepare_text.output("val.csv").to(fine_tune.input("val"))
pepare_text.output("test.csv").to(fine_tune.input("test"))
pepare_text.output("my_dict.csv").to(fine_tune.input("my_dict"))
return pipe
|
eikku/shark-attacks
|
create_pipeline.py
|
create_pipeline.py
|
py
| 824 |
python
|
en
|
code
| 2 |
github-code
|
6
|
29771468848
|
print("Python Program to Find Numbers Divisible by Another Number")
try:
num=int(input("Enter the number :"))
div=[]
if num>0:
for i in range(1,101): #other number till 1-100
if num % i==0 and i!=num:
div.append(i)
print(f"list of Divisors of number :{num} is :{div} ")
except Exception as e:
print(e)
|
engineerscodes/PyVisionHUB
|
PyStuff/01.Basic/Lab/divnum.py
|
divnum.py
|
py
| 356 |
python
|
en
|
code
| 4 |
github-code
|
6
|
2245791102
|
while True:
multiply = 1
list1 = []
number = int(input(print("Please enter a number for the factorial.")))
while (number != 0):
list1.append(number)
multiply = multiply * number
number = number - 1
print(list1)
print(multiply)
|
alpayalyn/Factorial_Calculation
|
main.py
|
main.py
|
py
| 284 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43626835774
|
class Solution(object):
def threeEqualParts(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
IMP = [-1, -1]
s = sum(A)
if s%3: return IMP
t = s // 3
if t == 0:
return [0, len(A)-1]
breaks = []
su = 0
for i, val in enumerate(A):
if val:
su += val
if su in {1, t+1, 2*t+1}:
breaks.append(i)
if su in {t, 2*t, 3*t}:
breaks.append(i)
i1, j1, i2, j2, i3, j3 = breaks
if not(A[i1:j1+1] == A[i2:j2+1] == A[i3:j3+1]):
return [-1, -1]
x = i2-j1-1
y = i3-j2-1
z = len(A)-j3-1
if x < z or y < z: return IMP
j1 += z
j2 += z
return [j1, j2+1]
def test(self):
testCases = [
[1,0,1,1,0],
# [0,1,0,1,1],
# [1,0,1,0,1],
# [1,1,0,1,1],
]
for arr in testCases:
res = self.threeEqualParts(arr)
print('res: %s' % res)
print('-='*30+'-')
if __name__ == '__main__':
Solution().test()
|
MichaelTQ/LeetcodePythonProject
|
solutions/leetcode_0901_0950/LeetCode0927_ThreeEqualParts.py
|
LeetCode0927_ThreeEqualParts.py
|
py
| 1,187 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73928148349
|
import random
import string
import factory
from django.contrib.auth import get_user_model
from reviews.models import Doctor, Review, Specialty
User = get_user_model()
def random_string(length=10):
return u"".join(random.choice(string.ascii_letters) for x in range(length))
class DoctorFactory(factory.django.DjangoModelFactory):
class Meta:
model = "reviews.Doctor"
first_name = "Ай"
last_name = "Болит"
patronymic = "Вениаминович"
class SpecFactory(factory.django.DjangoModelFactory):
class Meta:
model = "reviews.Specialty"
title = factory.LazyAttribute(lambda t: random_string())
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = User
username = factory.LazyAttribute(lambda t: random_string())
email = "[email protected]"
password = "superpassword"
class ReviewFactory(factory.DjangoModelFactory):
class Meta:
model = "reviews.Review"
author = factory.SubFactory(UserFactory)
doctor = factory.SubFactory(DoctorFactory)
ip_address = "127.0.0.1"
text = factory.LazyAttribute(lambda t: random_string())
|
idesu/review_moderation_lite
|
reviews/tests/factories.py
|
factories.py
|
py
| 1,158 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32731754668
|
from collections import deque
n, m, v = map(int, input().split())
lst = [[] for _ in range(n+1)]
visit_d = [0] * (n+1)
bfs_q = []
for i in range(m):
a, b = map(int, input().split())
lst[a].append(b)
lst[b].append(a)
# 각 요소들 정렬
for i in range(1, n+1):
lst[i].sort()
def dfs(start):
visit_d[start] = 1
print(start, end=' ')
for i in lst[start]:
if(visit_d[i] == 0):
dfs(i)
def bfs(start):
bfs_q = deque([start])
visit_b = [0] * (n+1)
visit_b[start] = 1
while(bfs_q):
find = bfs_q.popleft()
print(find, end=' ')
for i in lst[find]:
if(visit_b[i] == 0):
bfs_q.append(i)
visit_b[i] = 1
dfs(v)
print()
bfs(v)
|
woo222/baekjoon
|
python/그래프/s2_1260_DFS와 BFS.py
|
s2_1260_DFS와 BFS.py
|
py
| 773 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21951283838
|
#Спортсмен-лыжник начал тренировки, пробежав в первый день 10 км. Каждый следующий день он увеличивал длину пробега
# на P процентов от пробега предыдущего дня (P — вещественное, 0< P <50).
# По данному P определить, после какого дня суммарный пробег лыжника за все дни превысит 200 км, и вывести найденное
# количество дней K (целое) и суммарный пробег S (вещественное число).
a = 10
try:
print("P — вещественное, 0< P <50")
p = float(input("Введите число р:"))
while type(p) != float:
try:
float(p)
except TypeError:
print("Неправильно ввели число p !")
p = float(input("Введите число р:"))
if not(0<p<50):
print("Ошибка!Введите число в правильном диапазоне")
else:
k_d =1
s= a
while s != 200:
a = a+((a/100)*p)
k_d+=1
s +=a
if s >=200:
print(f"Количество дней: {k_d} \nCуммарный пробег: {s} км")
break
except Exception:
print("Ошибка! Введите корректное значение")
|
DaNil4594/EremenkoPythonProject
|
PZ_4/PZ_4_2.py
|
PZ_4_2.py
|
py
| 1,502 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
27259248370
|
"""We are the captains of our ships, and we stay 'till the end. We see our stories through.
"""
"""70. Climbing Stairs [Constant Space]
"""
class Solution:
def climbStairs(self, n):
if n <= 2:
return n
first, second = 1, 2
num_ways = 0
for _ in range(3, n+1):
num_ways = first + second
first = second
second = num_ways
return num_ways
|
asperaa/back_to_grind
|
DP/70. Climbing Stairs_constant_space.py
|
70. Climbing Stairs_constant_space.py
|
py
| 430 |
python
|
en
|
code
| 1 |
github-code
|
6
|
21764409471
|
from unittest import TestCase
def reverseInt(i: int) -> int:
result = 0
while i:
result = result * 10 + i % 10
i = int(i/10)
print(result)
class Test(TestCase):
def test_reverse_int(self):
answer = reverseInt(354)
|
debajyoti3061/crackingg_python
|
array/ReverseInteger.py
|
ReverseInteger.py
|
py
| 258 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17591799943
|
import requests
headers = {
'Host': 'bagel.htb:8000',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 Firefox/91.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'close',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
# Open the log file for writing
with open("log", "w") as log_file:
# Loop through the range of process IDs
for proc_id in range(1, 1001):
# Construct the URL for the current process ID
page_url = f"http://bagel.htb:8000/?page=../../../../../../../../proc/{proc_id}/cmdline"
# Use requests to fetch the page contents
response = requests.get(page_url, headers=headers, verify=False)
# Write the response content to the log file
log_file.write(f"Contents of /proc/{proc_id}/cmdline:\n{response.content.decode()}\n\n")
|
0xRoqeeb/scripts
|
ProcScanner/proscanner.py
|
proscanner.py
|
py
| 984 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30935953705
|
import math
a= input("请输入:")
b=a.split(",")
s=0
list1=[]
for i in b:
if len(i)!=4:
break;
else:
h=list(i)
for c in h:
t=h.index(c)
s+=int(c)*math.pow(2,3-t)
print(s)
list1.append(s)
print(list1)
# for d in list1:
# if d%5==0:
# print(b[list1.index(d)])
# else:
# print(d)
|
wuyijian123456/test1
|
venv/case/demo10.py
|
demo10.py
|
py
| 384 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34318970452
|
import re
import hashlib
dd_file = 'Project2.dd'
with open(dd_file, "rb") as f:
content = f.read()
f.close()
#signatures
JPEG_SOF = b'\xFF\xD8\xFF\xE0' #or b'\xFF\xD8\xFF\xDB'
JPEG_SOF2 = b'\xFF\xD8\xFF\xDB'
JPEG_EOF = b'\xFF\xD9\x00\x00\x00'
#creating a list of matches for Start of file signature so further work can be done to deduce if its an actual file
SOF1_list = [match.start() for match in re.finditer(re.escape(JPEG_SOF), content)]
SOF2_list =[match.start() for match in re.finditer(re.escape(JPEG_SOF2), content)]
EOF_list = [match.start() for match in re.finditer(re.escape(JPEG_EOF), content)]
SOF_list = []
i=0
while(i<len(SOF1_list)):
SOF_list.append(SOF1_list[i])
i+=1
i=0
while(i<len(SOF2_list)):
if SOF1_list.__contains__(SOF2_list[i]):
continue
else:
SOF_list.append(SOF2_list[i])
i+=1
#sorting the file to prepare it like all the others, to make sure offsets aren't out of bounds with each other.
SOF_list.sort()
EOF_list.sort()
'''This code validates that the start of a file should be less than the end of a file. EG: a file can't start at byte 100
and end at byte 98 that would be impossible. It also validates if there are more objects in the SOF list than the EOF
list it will delete the extra false positive start of file signatures in the list'''
if len(SOF_list) != len(EOF_list):
i = 0
while i<len(EOF_list):
if SOF_list[i]>EOF_list[i]:
del EOF_list[i]
i = i + 1
#file carving
i = 0
for SOF in SOF_list:
subdata=content[SOF:EOF_list[i]+2]
carve_filename=str(SOF)+"_"+str(EOF_list[i])+".jpg"
print("Found JPG starting offset", str(SOF), "End offset", str(EOF_list[i]))
carve_obj = open(carve_filename, 'wb')
carve_obj.write(subdata)
carve_obj.close()
i = i + 1
print("carving it to " + carve_filename)
#sha256 sum
with open(carve_filename, "rb") as f:
bytes = f.read() # read entire file as bytes
readable_hash = hashlib.sha256(bytes).hexdigest()
print("SHA256:", readable_hash,"\n")
|
jasonralexander/Comp6970DFIR
|
JPG.py
|
JPG.py
|
py
| 2,069 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73415902268
|
"""
Implement an algorithm to determine if a string has all unique characters. What if you can not use additional data structures?
"""
def uniqueString(aStr):
""" an elegant pythonic solution"""
aStr = sorted(aStr)
for i in aStr:
if aStr.count(i) > 1:
return False
else:
continue
return True
aStr = "abcdefg"
print(uniqueString(aStr))
print(aStr)
|
AndreiBratkovski/Training
|
CCC-school-work/Arrays and Strings/UniqueString.py
|
UniqueString.py
|
py
| 362 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34729450959
|
# -*- coding: utf-8 -*-
# © 2020 FreeDoo: Juan Ignacio Úbeda <[email protected]>
# © 2020 Avanzosc: Ana Juaristi <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import fields, models, api
import datetime
class ResCity(models.Model):
_inherit = 'res.city'
partner_zone_id = fields.Many2one(comodel_name='partner.delivery.zone', string='Zone')
class PartnerDeliveryZone(models.Model):
_inherit = 'partner.delivery.zone'
city_ids = fields.One2many(comodel_name="res.city",
inverse_name="partner_zone_id",
string="Cities")
class ResCityZip(models.Model):
_inherit = 'res.city.zip'
partner_zone_id = fields.Many2one(comodel_name='partner.delivery.zone',
string='Zone',
related='city_id.partner_zone_id')
|
JuaniFreedoo/BaserrikoPlaza
|
geonames_delivery_zone_link/models/delivery_carrier.py
|
delivery_carrier.py
|
py
| 810 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9054587294
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 28 16:52:49 2021
@author: shabalin
Utils to work with fable and hexrd functions.
"""
import sys, os
import numpy as np
import yaml, subprocess
#import cbftiffmxrdfix
def run_peaksearch(par_file=None):
""" Wrapper for the ImageD11 peaksearch.py script"""
with open(par_file) as f:
pars = yaml.safe_load(f)
if pars['stem_out'] == None:
pars['stem_out'] = ''
first_im = int(pars['first_image'])
last_im = int(pars['first_image']) + int(pars['nbr_images']) - 1
ndigits = pars['ndigits']
path_inp = os.path.join(pars['image_path'],pars['image_stem'])
path_out = os.path.join(pars['output_dir'], pars['stem_out']+pars['det_code']+'_peaks')
# construct the command for peaksearch.py
command = ('peaksearch.py -n {} -F {} -f {:d} -l {:d} -o {} -d {} -p Y --ndigits {:d} -S {:.3f} -T {:.3f} '.format(
path_inp,pars['filetype'],first_im,last_im,path_out,
pars['dark_image'],ndigits,pars['omegastep'], pars['startomega']
))
# Adds threshold values to command
for t in pars['thresholds']:
command += '-t {:d} '.format(t)
# Adds keyword args
if 'kwargs' in pars:
command += '{} '.format(pars['kwargs'])
# modify command for lunarc
if 'lunarc' in pars:
command = lunarc_path + command
print('Running peaksearch with the following command:')
print(command)
try:
subprocess.call(command, shell=True)
except AttributeError as a:
print('peaksearch.py ended with error. It seems to work nonetheless.', a)
del pars, first_im, last_im, ndigits, path_inp, path_out, command
return
def merge_peaks(par_file, config_file):
# Wrapper for ImageD11 merge_flt.py
if (par_file is None):
raise ValueError('Must supply par_file to run_peaksearcher')
with open(par_file) as f:
pars = yaml.safe_load(f)
if pars['stem_out'] == None:
pars['stem_out'] = ''
if 'merged_name' in pars:
file_out = os.path.join(pars['output_dir'],pars['stem_out']+pars['merged_name'])
else:
file_out = os.path.join(pars['output_dir'],pars['stem_out']+pars['det_code']+'_peaks_merged.flt')
inp = os.path.join(pars['output_dir'], pars['stem_out']+pars['det_code']+'_peaks')
print('Merging flt files matching {}'.format(inp))
if not config_file:
config_file = 'junk'
command = 'merge_flt.py {} {} {} {:d} '.format(config_file,inp,file_out,pars['pixel_tol']) + ('{:d} '*len(pars['thresholds'])).format(*pars['thresholds'])
# modify command for lunarc
if 'lunarc' in pars:
command = lunarc_path + command
print(command)
subprocess.call(command, shell=True)
del pars, file_out, inp, command
return
def hexrd_to_fable(path_to_hexrd_yml, path_to_fable_par, det=1, mat='Nb'):
detname = 'detector_{:d}'.format(det)
if mat=='ruby':
cell_params = { "a": 4.7608, "b": 4.7608, "c": 12.99568, "alpha": 90.0, "beta": 90.0, "gamma": 120.0, "lattice": 'R'}
elif mat=='Nb':
cell_params = { "a": 3.3042, "b": 3.3042, "c": 3.3042, "alpha": 90.0, "beta": 90.0, "gamma": 90.0, "lattice": 'I'}
elif mat=='CeO2':
cell_params = { "a": 5.41153, "b": 5.41153, "c": 5.41153, "alpha": 90.0, "beta": 90.0, "gamma": 90.0, "lattice": 'F'}
elif mat=='Ti':
cell_params = { "a": 2.9505, "b": 2.9505, "c": 4.6826, "alpha": 90.0, "beta": 90.0, "gamma": 120.0, "lattice": 'P'}
else:
print('ERROR! Incorrect material!')
with open(path_to_hexrd_yml) as f:
pars = yaml.safe_load(f)
wavelength = 12.39842/pars['beam']['energy']
translation = pars['detectors'][detname]['transform']['translation']
tilt = pars['detectors'][detname]['transform']['tilt']
frame_size = [pars['detectors'][detname]['pixels']['columns'], pars['detectors'][detname]['pixels']['rows']]
pix_size = pars['detectors'][detname]['pixels']['size']
if os.path.exists(path_to_fable_par):
if input('File %s already exist! Overwrite it? (y/n):' % path_to_fable_par) != 'y':
print('Aborted!')
return
else:
pass
else:
pass
f = open(path_to_fable_par,'w')
f.write( 'cell__a {}'.format(cell_params['a']) )
f.write( '\ncell__b {}'.format(cell_params['b']) )
f.write( '\ncell__c {}'.format(cell_params['c']) )
f.write( '\ncell_alpha {}'.format(cell_params['alpha']) )
f.write( '\ncell_beta {}'.format(cell_params['beta']) )
f.write( '\ncell_gamma {}'.format(cell_params['gamma']) )
f.write( '\ncell_lattice_[P,A,B,C,I,F,R] {}'.format(cell_params['lattice']) )
f.write( '\nchi {}'.format(0.0) )
f.write( '\ndistance {}'.format((-translation[2]*1000)) )
f.write( '\nfit_tolerance {}'.format(0.5) )
f.write( '\nmin_bin_prob {}'.format(1e-05) )
f.write( '\nno_bins {}'.format(10000) )
f.write( '\no11 {}'.format(0) )
f.write( '\no12 {}'.format(-1) )
f.write( '\no21 {}'.format(1) )
f.write( '\no22 {}'.format(0) )
f.write( '\nomegasign {}'.format(1.0) )
f.write( '\nt_x {}'.format(0) )
f.write( '\nt_y {}'.format(0) )
f.write( '\nt_z {}'.format(0) )
f.write( '\ntilt_x {}'.format(tilt[2]) )
f.write( '\ntilt_y {}'.format(tilt[1]) ) # -?
f.write( '\ntilt_z {}'.format(tilt[0]) )
f.write('\nwavelength {:0.6f}'.format(wavelength) )
f.write( '\nwedge {}'.format(0.0) )
f.write( '\nweight_hist_intensities {}'.format(0) )
f.write( '\ny_center {}'.format((translation[1]/pix_size[1] + frame_size[1]/2)) )
f.write( '\ny_size {}'.format((pix_size[1]*1000)) )
f.write( '\nz_center {}'.format((translation[0]/pix_size[0] + frame_size[0]/2)) )
f.write( '\nz_size {}'.format((pix_size[0]*1000)) )
f.close()
del detname, cell_params, pars, wavelength, translation, tilt, frame_size, pix_size
return
def fable_to_hexrd(path_to_fable_par, path_to_hexrd_yml):
y_frm_size = 2880
z_frm_size = 2880
with open(path_to_fable_par) as f:
for line in f:
if ('distance' in line):
dist = float(line.split()[1])/1000
elif ('tilt_x' in line):
tilt_1 = float(line.split()[1])
elif ('tilt_y' in line):
tilt_2 = float(line.split()[1])
elif ('tilt_z' in line):
tilt_3 = float(line.split()[1])
elif ('wavelength' in line):
wavelength = float(line.split()[1])
elif ('y_center' in line):
y_cen = float(line.split()[1])
elif ('y_size' in line):
y_pix_size = float(line.split()[1])/1000
elif ('z_center' in line):
z_cen = float(line.split()[1])
elif ('z_size' in line):
z_pix_size = float(line.split()[1])/1000
f.close()
pars = {'beam':
{'energy': 12.39842/wavelength, 'vector': {'azimuth': 90.0, 'polar_angle': 90.0}},
'detectors':
{'detector_1':
{'buffer': None,
'pixels': {'columns': y_frm_size, 'rows': z_frm_size, 'size': [z_pix_size, y_pix_size]},
'saturation_level': 14000.0,
'transform': {'tilt': [tilt_1, tilt_2, tilt_3], 'translation': [(z_cen-z_frm_size/2)*z_pix_size, (y_cen-y_frm_size/2)*y_pix_size, -dist]}}},
'id': 'instrument',
'oscillation_stage': {'chi': 0.0, 'translation': [0.0, 0.0, 0.0]}}
if os.path.exists(path_to_hexrd_yml):
if input('File %s already exist! Overwrite it? (y/n):' % path_to_hexrd_yml) != 'y':
print('Aborted!')
return
else:
pass
else:
pass
with open(path_to_hexrd_yml, 'w') as f:
yaml.dump(pars, f)
del y_frm_size, z_frm_size, pars, dist, tilt_1, tilt_2, tilt_3, wavelength, y_cen, y_pix_size, z_cen, z_pix_size
return
|
agshabalin/py3DXRD
|
.ipynb_checkpoints/fable_hexrd_utils-checkpoint.py
|
fable_hexrd_utils-checkpoint.py
|
py
| 7,948 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10282905855
|
import os
from flask import Flask
from flask_modals import Modal
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy, Pagination
from importlib import import_module
from apps.utils.stocks_properties import read_properties_file
db = SQLAlchemy()
login_manager = LoginManager()
print('El path de la aplicacion es : ',__path__)
props = read_properties_file('finanzas.properties')
sql_scripts = read_properties_file('sql_scripts.properties')
def register_extensions(app):
db.init_app(app)
print('1 Register extension')
login_manager.init_app(app)
def register_blueprints(app):
print('1 Register blueprints')
for module_name in ('authentication', 'home', 'masterplan', 'organizations', 'reports'):
module = import_module('apps.{}.routes'.format(module_name))
app.register_blueprint(module.blueprint)
def configure_database(app):
@app.before_first_request
def initialize_database():
print('3 configure database')
try:
print('#### Creando la base de datos ####')
db.create_all()
#from . import db
#db.init_app(app)
except Exception as e:
print('> Error: DBMS Exception: ' + str(e) )
# fallback to SQLite
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'db.sqlite3')
print('> Fallback to SQLite ')
db.create_all()
@app.teardown_request
def shutdown_session(exception=None):
db.session.remove()
def create_app(config):
print('4 Create app')
app = Flask(__name__)
modal = Modal(app)
app.config.from_object(config)
register_extensions(app)
register_blueprints(app)
configure_database(app)
return app
|
qa8990/reports
|
apps/__init__.py
|
__init__.py
|
py
| 1,875 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70452843067
|
# Programming 102 Lab 2
# * 2.1 Write sum() from scratch
def sum(numbers):
total = 0
for number in numbers:
if len(numbers) > 0:
total += number
return total
# * 2.2 Use a REPL to build a list of numbers
def collector():
import string
print('Please enter the number to be added:')
print('(enter \'done\' to see total or \'cancel\' to exit)')
valid = ['done', 'cancel']
integers = string.digits
response = input()
if response in valid:
response = response
elif response in integers:
response = float(response)
else:
response = 'invalid'
return response
numbers = []
response = collector()
message = 'Invalid response.'
while response != 'invalid':
if response == 'cancel':
message = 'ok bye'
break
elif response == 'done':
total = sum(numbers)
message = f'Your total is: {total}'
break
elif response == 0:
print('Empty entry has been ignored.')
response = collector()
elif float(response):
numbers.append(response)
message = f'{response} has been added to list.'
print(f'Your current list: {numbers}')
print('')
response = collector()
print(message)
|
austenc-id/Guild
|
0 - Prep Course/week-2/lab_number_lists.py
|
lab_number_lists.py
|
py
| 1,263 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73554109308
|
# Divisor takes in a number and returns all the divisors of that number
# ie div(13) == [1, 13]
# div(4) == [1, 2, 4]
def div(num):
divList = []
for i in range(1, int(num / 2) + 1):
if num % i == 0:
divList.append(i)
divList.append(num)
return divList
num = int(input("Choose a number to get divisors of "))
print(div(num))
|
LeoTheMighty/beginner_python_exercises
|
Divisor.py
|
Divisor.py
|
py
| 369 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17609317181
|
# encoding: utf-8
import os
import binascii
from collections import OrderedDict
import cachemodel
from basic_models.models import CreatedUpdatedAt
from django.urls import reverse
from django.db import models, transaction
from django.db.models import Q
from entity.models import BaseVersionedEntity
from issuer.models import BaseAuditedModelDeletedWithUser, BadgeInstance
from backpack.sharing import SharingManager
from issuer.utils import CURRENT_OBI_VERSION, get_obi_context, add_obi_version_ifneeded
from mainsite.managers import SlugOrJsonIdCacheModelManager
from mainsite.models import BadgrApp
from mainsite.utils import OriginSetting
class BackpackCollection(BaseAuditedModelDeletedWithUser, BaseVersionedEntity):
entity_class_name = 'BackpackCollection'
name = models.CharField(max_length=128)
description = models.CharField(max_length=255, blank=True)
share_hash = models.CharField(max_length=255, null=False, blank=True)
# slug has been deprecated, but keep for legacy collections redirects
slug = models.CharField(max_length=254, blank=True, null=True, default=None)
assertions = models.ManyToManyField('issuer.BadgeInstance', blank=True, through='backpack.BackpackCollectionBadgeInstance')
cached = SlugOrJsonIdCacheModelManager(slug_kwarg_name='entity_id', slug_field_name='entity_id')
def publish(self):
super(BackpackCollection, self).publish()
self.publish_by('share_hash')
self.created_by.publish()
def delete(self, *args, **kwargs):
super(BackpackCollection, self).delete(*args, **kwargs)
self.publish_delete('share_hash')
self.created_by.publish()
def save(self, **kwargs):
if self.pk:
BackpackCollectionBadgeInstance.objects.filter(
Q(badgeinstance__acceptance=BadgeInstance.ACCEPTANCE_REJECTED) | Q(badgeinstance__revoked=True)
).delete()
super(BackpackCollection, self).save(**kwargs)
@cachemodel.cached_method(auto_publish=True)
def cached_badgeinstances(self):
return self.assertions.filter(
revoked=False,
acceptance__in=(BadgeInstance.ACCEPTANCE_ACCEPTED, BadgeInstance.ACCEPTANCE_UNACCEPTED)
)
@cachemodel.cached_method(auto_publish=True)
def cached_collects(self):
return self.backpackcollectionbadgeinstance_set.filter(
badgeinstance__revoked=False,
badgeinstance__acceptance__in=(BadgeInstance.ACCEPTANCE_ACCEPTED,BadgeInstance.ACCEPTANCE_UNACCEPTED)
)
@property
def owner(self):
from badgeuser.models import BadgeUser
return BadgeUser.cached.get(id=self.created_by_id)
# Convenience methods for toggling published state
@property
def published(self):
return bool(self.share_hash)
@published.setter
def published(self, value):
if value and not self.share_hash:
self.share_hash = str(binascii.hexlify(os.urandom(16)), 'utf-8')
elif not value and self.share_hash:
self.publish_delete('share_hash')
self.share_hash = ''
@property
def share_url(self):
if self.published:
return OriginSetting.HTTP+reverse('collection_json', kwargs={'entity_id': self.share_hash})
def get_share_url(self, **kwargs):
return self.share_url
@property
def badge_items(self):
return self.cached_badgeinstances()
@badge_items.setter
def badge_items(self, value):
"""
Update this collection's list of BackpackCollectionBadgeInstance from a list of BadgeInstance EntityRelatedFieldV2 serializer data
:param value: list of BadgeInstance instances or list of BadgeInstance entity_id strings.
"""
def _is_in_requested_badges(entity_id):
if entity_id in value:
return True
try:
if entity_id in [i.entity_id for i in value]:
return True
except AttributeError:
pass
return False
with transaction.atomic():
existing_badges = {b.entity_id: b for b in self.badge_items}
# add missing badges
for badge_reference in value:
try:
if isinstance(badge_reference, BadgeInstance):
badgeinstance = badge_reference
else:
badgeinstance = BadgeInstance.cached.get(entity_id=badge_reference)
except BadgeInstance.DoesNotExist:
pass
else:
if badgeinstance.entity_id not in list(existing_badges.keys()):
BackpackCollectionBadgeInstance.cached.get_or_create(
collection=self,
badgeinstance=badgeinstance
)
# remove badges no longer in collection
for badge_entity_id, badgeinstance in list(existing_badges.items()):
if not _is_in_requested_badges(badge_entity_id):
BackpackCollectionBadgeInstance.objects.filter(
collection=self,
badgeinstance=badgeinstance
).delete()
def get_json(self, obi_version=CURRENT_OBI_VERSION, expand_badgeclass=False, expand_issuer=False, include_extra=True):
obi_version, context_iri = get_obi_context(obi_version)
json = OrderedDict([
('@context', context_iri),
('type', 'Collection'),
('id', add_obi_version_ifneeded(self.share_url, obi_version)),
('name', self.name),
('description', self.description),
('entityId', self.entity_id),
('owner', OrderedDict([
('firstName', self.cached_creator.first_name),
('lastName', self.cached_creator.last_name),
]))
])
json['badges'] = [b.get_json(obi_version=obi_version,
expand_badgeclass=expand_badgeclass,
expand_issuer=expand_issuer,
include_extra=include_extra) for b in self.cached_badgeinstances()]
return json
@property
def cached_badgrapp(self):
creator = self.cached_creator
if creator and creator.badgrapp_id:
return BadgrApp.objects.get(pk=creator.badgrapp_id)
return BadgrApp.objects.get_current(None)
class BackpackCollectionBadgeInstance(cachemodel.CacheModel):
collection = models.ForeignKey('backpack.BackpackCollection',
on_delete=models.CASCADE)
badgeuser = models.ForeignKey('badgeuser.BadgeUser', null=True, default=None,
on_delete=models.CASCADE)
badgeinstance = models.ForeignKey('issuer.BadgeInstance',
on_delete=models.CASCADE)
def publish(self):
super(BackpackCollectionBadgeInstance, self).publish()
self.collection.publish()
def delete(self):
super(BackpackCollectionBadgeInstance, self).delete()
self.collection.publish()
@property
def cached_badgeinstance(self):
return BadgeInstance.cached.get(id=self.badgeinstance_id)
@property
def cached_collection(self):
return BackpackCollection.cached.get(id=self.collection_id)
class BaseSharedModel(cachemodel.CacheModel, CreatedUpdatedAt):
SHARE_PROVIDERS = [(p.provider_code, p.provider_name) for code,p in list(SharingManager.ManagerProviders.items())]
provider = models.CharField(max_length=254, choices=SHARE_PROVIDERS)
source = models.CharField(max_length=254, default="unknown")
class Meta:
abstract = True
def get_share_url(self, provider, **kwargs):
raise NotImplementedError()
class BackpackBadgeShare(BaseSharedModel):
badgeinstance = models.ForeignKey("issuer.BadgeInstance", null=True,
on_delete=models.CASCADE)
def get_share_url(self, provider, **kwargs):
return SharingManager.share_url(provider, self.badgeinstance, **kwargs)
class BackpackCollectionShare(BaseSharedModel):
collection = models.ForeignKey('backpack.BackpackCollection', null=False,
on_delete=models.CASCADE)
def get_share_url(self, provider, **kwargs):
return SharingManager.share_url(provider, self.collection, **kwargs)
|
reedu-reengineering-education/badgr-server
|
apps/backpack/models.py
|
models.py
|
py
| 8,542 |
python
|
en
|
code
| 2 |
github-code
|
6
|
6415578892
|
#Задача 15
quantWatermelon = int(input("Введите количество арбузов : "))
minWater = maxWater = int(input(f"Введите ввес арбуза : "))
for i in range(1, quantWatermelon):
temp = int(input(f"Введите ввес арбуза : "))
if(temp > maxWater):
maxWater = temp
elif (temp < minWater):
minWater = temp
print(f"Для тещи {minWater}")
print(f"Для себя {maxWater}")
|
ApostaLOxsar/Pyton
|
Les2/Task15.py
|
Task15.py
|
py
| 460 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
70075741628
|
# -*- encoding:utf-8 -*-
'''
@time: 2019/12/21 8:28 下午
@author: huguimin
@email: [email protected]
一个doc表示一个样本
'''
import math
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from layers.dynamic_rnn import DynamicLSTM
from layers.attention import Attention
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = nn.Parameter(
torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
def forward(self, text, adj):
hidden = torch.matmul(text, self.weight)
denom = torch.sum(adj, dim=2, keepdim=True) + 1
output = torch.matmul(adj, hidden) / denom
if self.bias is not None:
return output + self.bias
else:
return output
class ECGCN(nn.Module):
def __init__(self, word_embedding, pos_embedding, opt):
super(ECGCN, self).__init__()
self.opt = opt
self.embed = nn.Embedding.from_pretrained(torch.tensor(word_embedding, dtype=torch.float))
self.pos_embed = nn.Embedding.from_pretrained(torch.tensor(pos_embedding, dtype=torch.float))
self.word_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)#(32,75,45,200)
self.clause_encode = Attention(2*opt.hidden_dim, 1, opt.max_sen_len, opt)#(32,75,200)
# gcn
# self.gc1 = GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim)
# self.gc2 = GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim)
# self.gc3 = GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim)
#gat
# self.ga1 = GAT(2*opt.hidden_dim, 2*opt.hidden_dim, self.opt.num_class, self.opt.keep_prob1, self.opt.alpha, self.opt.heads)
self.fc1 = nn.Linear(2*opt.hidden_dim + self.opt.embedding_dim_pos, 2*opt.hidden_dim)
self.fc2 = nn.Linear(2*opt.hidden_dim, opt.num_class)
self.text_embed_dropout = nn.Dropout(opt.keep_prob1)
self.gates = nn.ModuleList()
self.gcns = nn.ModuleList()
for i in range(3):
self.gcns.append(GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim))
self.gates.append(nn.Linear(2*opt.hidden_dim, 1))
def position_weight(self, inputs, emotion_id, doc_len):
"""
:param inputs: [32, 75, 200]
:param emotion_id: [32,]
:param doc_len: [32]
:param pos_embedding: [103, 50]
:return:[32,75,50]
"""
batch_size, max_len = inputs.shape[0], inputs.shape[1]
relative_pos = np.zeros((batch_size, max_len))
for sample in range(batch_size):
len = doc_len[sample].item()
for i in range(len):
relative_pos[sample][i] = i - emotion_id[sample].item() + 69
return relative_pos
def emotion_encode(self, inputs, emotion_id):
"""
:param inputs: [32, 75, 200]
:param emotion_id: [32,]
:param doc_len: [32,]
:return: [32, 1, 200]
"""
batch_size, max_len, dim = inputs.shape[0], inputs.shape[1], inputs.shape[2]
emotion_clause = np.zeros((batch_size, dim))
for sample in range(batch_size):
clause = inputs[sample][emotion_id[sample]]
emotion_clause[sample] = clause.cpu().detach().numpy()
return torch.FloatTensor(emotion_clause)
def emotion_weight(self, inputs, emotion_clause):
"""
:param inputs: [32, 75, 200]
emotion_clause:[32, 1, 200]
:return: [32, 75]
"""
batch, dim = inputs.shape[0], inputs.shape[2]
emotion_clause = torch.reshape(emotion_clause, [batch, dim, 1])
alpha = torch.reshape(torch.matmul(inputs, emotion_clause.float()), [-1, self.opt.max_doc_len, 1])
return alpha
def mask(self, inputs, emotion_id):
"""
:param inputs: [32,75,200]
:param emotion_id: [32,]
:return: [32, 1, 200]
"""
batch_size, max_len = inputs.shape[0], inputs.shape[1]
emotion_idx = emotion_id.cpu().numpy()
mask = [[] for i in range(batch_size)]
for i in range(batch_size):
for j in range(emotion_idx[i]):
mask[i].append(0)
for j in range(emotion_idx[i], emotion_id[i] + 1):
mask[i].append(1)
for j in range(emotion_idx[i] + 1, max_len):
mask[i].append(0)
mask = torch.tensor(mask).unsqueeze(2).float().to(self.opt.device)
return mask * inputs
def pack_sen_len(self, sen_len):
"""
:param sen_len: [32, 75]
:return:
"""
batch_size = sen_len.shape[0]
up_sen_len = np.zeros([batch_size, self.opt.max_doc_len])
for i, doc in enumerate(sen_len):
for j, sen in enumerate(doc):
if sen == 0:
up_sen_len[i][j] = 1
else:
up_sen_len[i][j] = sen
return torch.tensor(up_sen_len)
def forward(self, inputs):
x, sen_len, doc_len, doc_id, emotion_id, adj = inputs
up_sen_len = self.pack_sen_len(sen_len)
x = torch.reshape(x, [-1, self.opt.max_sen_len])
x = self.embed(x)
x = self.text_embed_dropout(x)
up_sen_len = torch.reshape(up_sen_len, [-1])
word_encode = self.word_lstm(x, up_sen_len) #(32*75, batch_max_len, 200)
clause_encode = self.clause_encode(word_encode, sen_len)
embs = [clause_encode]
embs += [self.pos_embed(torch.LongTensor(self.position_weight(clause_encode, emotion_id, doc_len)).to(self.opt.device))]
emotion_encode = self.emotion_encode(clause_encode, emotion_id) ###情感子句的嵌入表示
###对每层的GCN都与emotion_encode计算一个score.
# x = F.relu(self.gc1(clause_encode, adj))
# x = F.relu(self.gc2(x, adj))
# x = F.relu(self.gc3(x, adj))
x = clause_encode
for i in range(3):
x = F.relu(self.gcns[i](x, adj))
weight = F.sigmoid(self.gates[i](emotion_encode))
weight = weight.unsqueeze(dim=-1)
x = x * weight
output = self.fc2(x.float())
return output
# def forward(self, inputs, vs=False):
# attention = []
# x, sen_len, doc_len, doc_id, emotion_id, adj = inputs#(x(32,75, 45)), (32, 75)
# up_sen_len = self.pack_sen_len(sen_len)
# x = torch.reshape(x, [-1, self.opt.max_sen_len])
# x = self.embed(x)
# x = self.text_embed_dropout(x)
# up_sen_len = torch.reshape(up_sen_len, [-1])
# word_encode = self.word_lstm(x, up_sen_len) #(32*75, batch_max_len, 200)
# clause_encode = self.clause_encode(word_encode, sen_len)
# embs = [clause_encode]
# embs += [self.pos_embed(torch.LongTensor(self.position_weight(clause_encode, emotion_id, doc_len)).to(self.opt.device))]
# "concat"
# clause_encode = torch.cat(embs, dim=2)
# clause_encode = torch.reshape(clause_encode, [-1, self.opt.max_doc_len, 2 * self.opt.hidden_dim + self.opt.embedding_dim_pos])
# clause_encode = self.fc1(clause_encode)
# # 策略1 "emotion clause 与 clause的attention weight"
# # emotion_encode = self.emotion_encode(clause_encode, emotion_id)
# # batch, dim = clause_encode.shape[0], clause_encode.shape[2]
# # emotion_encode = torch.reshape(emotion_encode, [batch, dim , 1])
# # alpha = self.emotion_weight(clause_encode, emotion_encode)
# #
# # ones = torch.ones((batch, self.opt.max_doc_len, 1))
# #
# # emotion_encode = emotion_encode.expand(-1,-1,self.opt.max_doc_len).transpose(1,2)
# # clause_encode = alpha * emotion_encode + (ones-alpha)*clause_encode
# x = F.relu(self.gc1(clause_encode, adj))
# x = F.relu(self.gc2(x, adj))
# # x = F.relu(self.gc3(x, adj))
# # output = self.ga1(clause_encode, adj)
#
# batch, dim = clause_encode.shape[0], clause_encode.shape[2]
# ones = torch.ones((batch, self.opt.max_doc_len, 1)).to(self.opt.device)
# emotion_encode = self.emotion_encode(x, emotion_id).to(self.opt.device)
# alpha = self.emotion_weight(clause_encode, emotion_encode)
# # # emotion_encode = self.mask(x, emotion_id)
# # # alpha_mat = torch.matmul(emotion_encode, clause_encode.transpose(1,2))
# # # alpha = F.softmax(alpha_mat.sum(1, keepdim=True), dim=2).transpose(1,2) #(32,1,75)
# # # ones = torch.ones((batch, self.opt.max_doc_len, 1))
# # emotion_encode = torch.reshape(emotion_encode, [batch, dim, 1])
# # emotion_encode = emotion_encode.expand(-1, -1, self.opt.max_doc_len).transpose(1, 2)
# # # x = emotion_encode * alpha + (ones-alpha)*clause_encode
# emotion_encode = torch.reshape(emotion_encode, [batch, dim, 1])
# emotion_encode = emotion_encode.expand(-1, -1, self.opt.max_doc_len).transpose(1, 2)
# x = clause_encode * alpha + (ones - alpha) * emotion_encode
# x = self.text_embed_dropout(x)
# # # x = torch.matmul(alpha, clause_encode).squeeze(1)
# #
# # # 策略2 以原始的句表示为主,图卷积作为辅助
# # #
# #
# output = self.fc2(x.float())
# if vs:
# return output, attention
# return output
|
LeMei/FSS-GCN
|
models/word2vec/ecgcn.py
|
ecgcn.py
|
py
| 9,816 |
python
|
en
|
code
| 14 |
github-code
|
6
|
8670813064
|
import pathlib
def get_desanitizer(celltypes_dir):
cell_type_list = read_all_manifests(celltypes_dir)
return desanitizer_from_meta_manifest(cell_type_list)
def desanitizer_from_meta_manifest(cell_type_list):
"""
cell_type_list is the result of reading list_of_manifests
"""
desanitizer = dict()
for cell_type in cell_type_list:
m = cell_type['machine_readable']
h = cell_type['human_readable']
if m in desanitizer:
if h != desanitizer[m]:
raise RuntimeError(f"{m} occurs more than once")
desanitizer[m] = h
return desanitizer
def read_all_manifests(data_dir):
"""
Return:
valid_cell_types -- list of dicts like
{'hierarcy': 'Level_1',
'data_path': path_to_zarr,
'human_readable': human_readable_name,
'machine_readable': machine_readable_name,
'unique': a_unique_key}
"""
sub_dirs = [n for n in data_dir.iterdir() if n.is_dir()]
list_of_manifests = []
for d in sub_dirs:
m = d / 'manifest.csv'
if m.is_file():
list_of_manifests.append(m)
return read_list_of_manifests(list_of_manifests)
def read_list_of_manifests(list_of_manifests):
found_machine = set()
valid_cell_types = []
#for child_dir in sub_dirs:
for manifest_path in list_of_manifests:
child_dir = manifest_path.parent
this_hierarchy = child_dir.name
if not manifest_path.is_file():
raise RuntimeError(
f"cannot find {manifest_path.resolve().absolute()}")
this_manifest = read_manifest(manifest_path)
for manifest_key in this_manifest:
element = this_manifest[manifest_key]
unq_key = f"{this_hierarchy}/{element['machine_readable']}"
if unq_key in found_machine:
raise RuntimeError(
f"{unq_key} occurs more than once")
found_machine.add(unq_key)
this_element = {'hierarchy': this_hierarchy,
'human_readable': element['human_readable'],
'machine_readable': element['machine_readable'],
'unique': unq_key}
valid_cell_types.append(this_element)
return valid_cell_types
def read_manifest(manifest_path):
"""
Get a lookup table from filename to
celltype name and machine readable group
name from the manifest.csv files written
by Lydia's script
"""
label_idx = None
path_idx = None
with open(manifest_path, "r") as in_file:
header = in_file.readline().strip().split(',')
for idx, val in enumerate(header):
if val == 'label':
label_idx = idx
elif val == 'file_name':
path_idx = idx
assert label_idx is not None
assert path_idx is not None
file_path_list = []
human_readable_list = []
for line in in_file:
line = line.strip().split(',')
pth = line[path_idx]
human_readable = line[label_idx]
file_path_list.append(pth)
human_readable_list.append(human_readable)
(sanitized_list,
_ ) = sanitize_cluster_name_list(human_readable_list)
result = dict()
for file_path, human_readable, sanitized in zip(file_path_list,
human_readable_list,
sanitized_list):
result[file_path] = {"human_readable": human_readable,
"machine_readable": sanitized}
return result
def sanitize_cluster_name(name):
for bad_char in (' ', '/'):
name = name.replace(bad_char, '_')
return name
def sanitize_cluster_name_list(
raw_cluster_name_list):
sanitized_name_set = set()
sanitized_name_list = []
desanitizer = dict()
for name in raw_cluster_name_list:
sanitized_name = sanitize_cluster_name(name)
if name in sanitized_name_set:
raise RuntimeError(
f"{sanitized_name} occurs more than once")
sanitized_name_set.add(sanitized_name)
sanitized_name_list.append(sanitized_name)
desanitizer[sanitized_name] = name
return sanitized_name_list, desanitizer
def get_class_lookup(
anno_path):
"""
returns subclass_to_clusters and class_to_clusters which
map the names of classes to lists of the names of clusters
therein
also return a set containing all of the valid cluster names
"""
anno_path = pathlib.Path(anno_path)
if not anno_path.is_file():
raise RuntimeError(f"{anno_path} is not a file")
subclass_to_clusters = dict()
class_to_clusters = dict()
valid_clusters = set()
desanitizer = dict()
with open(anno_path, "r") as in_file:
header = in_file.readline()
for line in in_file:
params = line.replace('"', '').strip().split(',')
assert len(params) == 4
cluster_name = params[1]
subclass_name = params[2]
class_name = params[3]
sanitized_cluster_name = sanitize_cluster_name(cluster_name)
sanitized_subclass_name = sanitize_cluster_name(subclass_name)
sanitized_class_name = sanitize_cluster_name(class_name)
for dirty, clean in zip((cluster_name,
subclass_name,
class_name),
(sanitized_cluster_name,
sanitized_subclass_name,
sanitized_class_name)):
if clean in desanitizer:
if desanitizer[clean] != dirty:
msg = "\nmore than one way to desanitize "
msg += f"{clean}\n"
msg += f"{dirty}\n"
msg += f"{desanitizer[clean]}\n"
raise RuntimeError(msg)
desanitizer[clean] = dirty
valid_clusters.add(sanitized_cluster_name)
if subclass_name not in subclass_to_clusters:
subclass_to_clusters[sanitized_subclass_name] = []
if class_name not in class_to_clusters:
class_to_clusters[sanitized_class_name] = []
subclass_to_clusters[sanitized_subclass_name].append(
sanitized_cluster_name)
class_to_clusters[sanitized_class_name].append(
sanitized_cluster_name)
return (subclass_to_clusters,
class_to_clusters,
valid_clusters,
desanitizer)
|
AllenInstitute/neuroglancer_formatting_scripts
|
src/neuroglancer_interface/utils/celltypes_utils.py
|
celltypes_utils.py
|
py
| 6,758 |
python
|
en
|
code
| 2 |
github-code
|
6
|
23811859933
|
from typing import List, Tuple
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import datasets, models, transforms
import time
import copy
from PIL import Image
from grid import SQUARES
class GeoModel:
"""Encapsulates the creation, training, saving, loading and evaluation
of the geographic prediction model.
The selected map region is divided up into squares, and the model predicts
the probability of the input image being in any given square.
"""
def __init__(self):
self.data_transforms = {
"train": transforms.Compose(
[
transforms.RandomResizedCrop(512),
transforms.ToTensor(),
]
),
"val": transforms.Compose(
[
transforms.Resize(512),
transforms.CenterCrop(512),
transforms.ToTensor(),
]
),
}
self.image_datasets = {
"train": datasets.ImageFolder("data", self.data_transforms["train"]),
"val": datasets.ImageFolder("valdata", self.data_transforms["val"]),
}
self.dataloaders = {
x: torch.utils.data.DataLoader(
self.image_datasets[x], batch_size=4, shuffle=True, num_workers=4
)
for x in ["train", "val"]
}
self.dataset_sizes = {x: len(self.image_datasets[x]) for x in ["train", "val"]}
self.class_names = self.image_datasets["train"].classes
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.net = models.resnet18(pretrained=True)
self.num_features = self.net.fc.in_features
# Our network doesn't use softmax as the last layer, since we use
# CrossEntropy loss which already implicitly does softmax,
# and softmax isn't idempotent. So we manually add softmax
# during inference.
self.net.fc = nn.Linear(self.num_features, len(self.class_names))
self.net = self.net.to(self.device)
self.criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
self.optimizer = optim.SGD(self.net.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
self.scheduler = lr_scheduler.StepLR(self.optimizer, step_size=7, gamma=0.1)
def _train_model(self, model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print("Epoch {}/{}".format(epoch, num_epochs - 1))
print("-" * 10)
# Each epoch has a training and validation phase
for phase in ["train", "val"]:
if phase == "train":
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in self.dataloaders[phase]:
inputs = inputs.to(self.device)
labels = labels.to(self.device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == "train"):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == "train":
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == "train":
scheduler.step()
epoch_loss = running_loss / self.dataset_sizes[phase]
epoch_acc = running_corrects.double() / self.dataset_sizes[phase]
print(
"{} Loss: {:.4f} Acc: {:.4f}".format(phase, epoch_loss, epoch_acc)
)
# deep copy the model
if phase == "val" and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print(
"Training complete in {:.0f}m {:.0f}s".format(
time_elapsed // 60, time_elapsed % 60
)
)
print("Best val Acc: {:4f}".format(best_acc))
# Load best model weights found during the training
model.load_state_dict(best_model_wts)
return model
def train(self, num_epochs=25):
"""Fine-tunes the pre-trained model using the parameters specified in this class's
`__init__`. The trained model is then stored in this class for usage.
Takes a handful of minutes per epoch on a 30-series Nvidia CUDA-enabled GPU.
"""
self.net = self._train_model(
self.net,
self.criterion,
self.optimizer,
self.scheduler,
num_epochs=num_epochs,
)
def save_to_disk(self, path: str = "models/resnet18v1"):
"""Saves the model parameters to disk using the specified `path`."""
torch.save(self.net.state_dict(), path)
def load_from_disk(self, path: str = "models/resnet18v1"):
"""Loads the model parameters from disk using the specified `path`."""
self.net.load_state_dict(torch.load(path))
self.net.eval()
def predict_random_image(
self,
) -> Tuple[Image.Image, List[float], Tuple[float, float]]:
"""Select a random image from the validaiton data, run inference
on it, and return the image as well as the predicted probabilities
and the correct location for the image.
"""
_, (inputs, labels) = next(enumerate(self.dataloaders["val"]))
inputs = inputs.to(self.device)
labels = labels.to(self.device)
raw_outputs = self.net(inputs)
outputs = nn.functional.softmax(raw_outputs, dim=1)
# Just take the first image + probabilities of the batch
net_probabilities = outputs.cpu().detach().numpy()[0]
# The probabilities are in the internal order of the network.
# We need to assign them the correct class names
probabilities = [None] * len(self.class_names)
for i in range(len(self.class_names)):
# Note that we assume that class names are just numbers of squares.
# If we wanted to use strings instead, we would have to use a dict.
probabilities[int(self.class_names[i])] = net_probabilities[i]
return (
transforms.ToPILImage()(inputs[0]).convert("RGB"),
probabilities,
SQUARES[int(self.class_names[int(labels[0])])].center,
)
if __name__ == "__main__":
# This main method will train the model and save it to disk.
# Load pre-trained model and finetune the weight by training it.
# The model chosen is ResNet18, which is the 18-layer version of ResNet
# pere-trained on the ImageNet dataset.
# We just finetune the weights using our own Google Street View data.
model = GeoModel()
model.train(num_epochs=25)
# Save model weights to disk so that we can load the trained model later
model.save_to_disk()
# Load pre-trained model and load the finetuned weights from disk
model = GeoModel()
model.load_from_disk()
# Run inference on a random image from the validation dataset
image, probs = model.predict_random_image()
pass
|
yawnston/geo-guessing
|
model.py
|
model.py
|
py
| 8,132 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15565374410
|
from pathlib import Path
WHERE_CLAUSE = "where"
# DATABASE Connection constants
DB_USERNAME = "project1user"
DB_PASSWORD = "project1pass"
DEFAULT_DB = "project1db"
VERBOSITY_DEFAULT = 2
MACHINE = "lab-machine"
# Benchmark constants
EPINIONS = "epinions"
INDEXJUNGLE = "indexjungle"
TIMESERIES = "timeseries"
BENCHMARKS = [
EPINIONS,
INDEXJUNGLE,
TIMESERIES,
]
# File Paths
TLD = Path(__file__).parent
DDL_DIRECTORY = TLD / "ddls/"
RESULTS_DIRECTORY = TLD / "benchbase_data/"
SCRIPTS_DIRECTORY = TLD / "scripts/"
TEMP_CSV = TLD / "temp.csv"
ACTIONS_SQL = TLD / "actions.sql"
STATE_DIRECTORY = TLD / "state/"
STATE_JSON = STATE_DIRECTORY / "state.json"
STATE_CANDIDATES = STATE_DIRECTORY / "candidates.txt"
KEY_TABLE_INDEXES = "table_indexes"
KEY_INDEX_COLUMNS = "column_indexes"
|
karthik-ramanathan-3006/15-799-Special-Topics-in-Database-Systems
|
constants.py
|
constants.py
|
py
| 800 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71584682748
|
from bs4 import BeautifulSoup
import requests
class DHMenuScraper:
menuLink = "https://nutrition.sa.ucsc.edu/menuSamp.asp?"
dHallCodes = {
"nineten" : "locationNum=40&locationName=Colleges+Nine+%26+Ten+Dining+Hall",
"cowellstevenson" : "locationNum=05&locationName=Cowell+Stevenson+Dining+Hall"
}
def __init__(self):
return
def getFullMenu(self, dHall, mealNum):
fullUrl = self.menuLink + self.dHallCodes[dHall]
page = requests.get(fullUrl)
soup = BeautifulSoup(page.text, 'html.parser')
# finds the correct table for the meal
meal = soup.find_all('div', class_='menusampmeals')[mealNum]
# variables for loop to find the meals
current = meal
firstTableFound = True
while current is not None:
# print(current)
if current.name == 'table':
if firstTableFound:
firstTableFound = False
else:
# we are done
break
current = current.parent
rawMeals = current.find_all('div', class_='menusamprecipes')
finalMeals = []
for meal in rawMeals:
finalMeals.append(meal.string)
return finalMeals
|
kschniedergers/DHBot
|
DHMenuScraper.py
|
DHMenuScraper.py
|
py
| 1,281 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29564758485
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import random
from model.leverage_bracket import leverage_bracket
from model.symbol import symbol as s
from operation.contract.client.leverage_bracket.query_leverage_bracket_list import query_leverage_bracket_list
from test_cases.contract.client.conftest import *
from common.logger import logger
class Test_query_leverage_bracket_list:
''' 查询所有交易对杠杆分层
1,从接口返回结果随机选择一个交易对与数据库进行对比
2,根据交易对,数据库查找对应交易分层信息
3,对比分层信息
'''
@pytest.mark.single
# @pytest.mark.usefixtures("step_first")
@pytest.mark.parametrize("scene,except_result, except_returnCode, except_msg",
api_leverage_data["query_leverage_bracket_list"])
def test_query_leverage_bracket_list(self,scene,except_result,except_returnCode, except_msg):
# logger.info("*************** 开始执行用例 ***************")
logger.info(f'场景【{scene}】信息:{except_result}-{except_returnCode}-"{except_msg}"')
result = query_leverage_bracket_list()
logger.warning(f'场景-[{scene}]的返回信息是:{result.response}')
try:
# 从返回结果随机选择一个交易对
leverage_result = random.choice(result.response["result"])
symbol = leverage_result.get("symbol")
# 数据库获取该交易对信息
symbol_single = s.query.filter(s.symbol == '{}'.format(symbol)).first()
# 根据 根据交易对 symbol_id 获取分层详细信息
symbol_list = leverage_bracket.query.filter(leverage_bracket.symbol_id == symbol_single.id).all()
if symbol_list is not None:
for symbol_ in symbol_list:
for res in leverage_result.get('leverageBrackets'):
if symbol_.bracket == res['bracket']:
assert float(symbol_.max_nominal_value) == \
float(res['maxNominalValue'])
assert float(symbol_.max_nominal_value) == \
float(res['maxNominalValue'])
assert float(symbol_.maint_margin_rate) == \
float(res['maintMarginRate'])
assert float(symbol_.start_margin_rate) == \
float(res['startMarginRate'])
assert float(symbol_.max_leverage) == \
float(res['maxLeverage'])
assert float(symbol_.min_leverage) == \
float(res['minLeverage'])
else:
# 该交易对不在数据库之中
assert leverage_result is not None
logger.error("查询所有交易对杠杆分层接口返回了数据库不存在的交易对")
except Exception as e:
logger.error(e)
assert result.status_code == 200
assert except_result == result.response["msgInfo"]
assert except_returnCode == result.response["returnCode"]
if except_returnCode == 0:
assert except_msg in str(result.response["result"])
else:
assert except_msg in result.response["error"]["msg"]
# logger.info("*************** 结束执行用例 ***************")
if __name__ == '__main__':
pytest.main(["-q", "-s", "test_query_leverage_bracket_list.py"])
|
shiqilouyang/thanos_test
|
test_cases/contract/client/leverage_bracket/test_query_everage_bracket_list.py
|
test_query_everage_bracket_list.py
|
py
| 3,595 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23182257426
|
import pytest
import json
import ipaddress
from tests.common.utilities import wait_until
from tests.common import config_reload
import ptf.testutils as testutils
import ptf.mask as mask
import ptf.packet as packet
import time
pytestmark = [
pytest.mark.topology('t0'),
pytest.mark.device_type('vs')
]
def add_ipaddr(ptfhost, nexthop_addrs, prefix_len, nexthop_devs, ipv6=False):
for idx in range(len(nexthop_addrs)):
if ipv6:
ptfhost.shell("ip -6 addr add {}/{} dev eth{}".format(nexthop_addrs[idx], prefix_len, nexthop_devs[idx]), module_ignore_errors=True)
else:
ptfhost.shell("ip addr add {}/{} dev eth{}".format(nexthop_addrs[idx], prefix_len, nexthop_devs[idx]), module_ignore_errors=True)
def del_ipaddr(ptfhost, nexthop_addrs, prefix_len, nexthop_devs, ipv6=False):
for idx in range(len(nexthop_addrs)):
if ipv6:
ptfhost.shell("ip -6 addr del {}/{} dev eth{}".format(nexthop_addrs[idx], prefix_len, nexthop_devs[idx]), module_ignore_errors=True)
else:
ptfhost.shell("ip addr del {}/{} dev eth{}".format(nexthop_addrs[idx], prefix_len, nexthop_devs[idx]), module_ignore_errors=True)
def generate_and_verify_traffic(duthost, ptfadapter, ip_dst, expected_ports, ipv6=False):
if ipv6:
pkt = testutils.simple_tcpv6_packet(
eth_dst=duthost.facts["router_mac"],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_src='2001:db8:85a3::8a2e:370:7334',
ipv6_dst=ip_dst,
ipv6_hlim=64,
tcp_sport=1234,
tcp_dport=4321)
else:
pkt = testutils.simple_tcp_packet(
eth_dst=duthost.facts["router_mac"],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_src='1.1.1.1',
ip_dst=ip_dst,
ip_ttl=64,
tcp_sport=1234,
tcp_dport=4321)
exp_pkt = pkt.copy()
exp_pkt = mask.Mask(exp_pkt)
exp_pkt.set_do_not_care_scapy(packet.Ether, 'dst')
exp_pkt.set_do_not_care_scapy(packet.Ether, 'src')
if ipv6:
exp_pkt.set_do_not_care_scapy(packet.IPv6, 'hlim')
exp_pkt.set_do_not_care_scapy(packet.IPv6, 'chksum')
else:
exp_pkt.set_do_not_care_scapy(packet.IP, 'ttl')
exp_pkt.set_do_not_care_scapy(packet.IP, 'chksum')
testutils.send(ptfadapter, 5, pkt)
testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=expected_ports)
def run_static_route_test(duthost, ptfadapter, ptfhost, prefix, nexthop_addrs, prefix_len, nexthop_devs, ipv6=False, config_reload_test=False):
# Add ipaddresses in ptf
add_ipaddr(ptfhost, nexthop_addrs, prefix_len, nexthop_devs, ipv6=ipv6)
try:
# Add static route
duthost.shell("sonic-db-cli CONFIG_DB hmset 'STATIC_ROUTE|{}' nexthop {}".format(prefix, ",".join(nexthop_addrs)))
time.sleep(5)
# Check traffic get forwarded to the nexthop
ip_dst = str(ipaddress.ip_network(unicode(prefix))[1])
generate_and_verify_traffic(duthost, ptfadapter, ip_dst, nexthop_devs, ipv6=ipv6)
# Config save and reload if specified
if config_reload_test:
duthost.shell('config save -y')
config_reload(duthost)
generate_and_verify_traffic(duthost, ptfadapter, ip_dst, nexthop_devs, ipv6=ipv6)
finally:
# Remove static route
duthost.shell("sonic-db-cli CONFIG_DB del 'STATIC_ROUTE|{}'".format(prefix), module_ignore_errors=True)
# Delete ipaddresses in ptf
del_ipaddr(ptfhost, nexthop_addrs, prefix_len, nexthop_devs, ipv6=ipv6)
# Config save if the saved config_db was updated
if config_reload_test:
duthost.shell('config save -y')
def get_vlan_info(duthost, tbinfo, ipv6=False):
mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
vlan_intf = mg_facts['minigraph_vlan_interfaces'][1 if ipv6 else 0]
prefix_len = vlan_intf['prefixlen']
vlan_subnet = ipaddress.ip_network(vlan_intf['subnet'])
vlan_ports = mg_facts['minigraph_vlans'][mg_facts['minigraph_vlan_interfaces'][1 if ipv6 else 0]['attachto']]['members']
vlan_ptf_ports = [mg_facts['minigraph_ptf_indices'][port] for port in vlan_ports]
return prefix_len, vlan_subnet, vlan_ptf_ports
def test_static_route(duthost, ptfadapter, ptfhost, tbinfo):
prefix_len, vlan_subnet, vlan_ptf_ports = get_vlan_info(duthost, tbinfo)
run_static_route_test(duthost, ptfadapter, ptfhost, "1.1.1.0/24",
[str(vlan_subnet[11])], prefix_len, [vlan_ptf_ports[0]])
def test_static_route_ecmp(duthost, ptfadapter, ptfhost, tbinfo):
prefix_len, vlan_subnet, vlan_ptf_ports = get_vlan_info(duthost, tbinfo)
if len(vlan_ptf_ports) >= 3:
nexthops = [str(vlan_subnet[20 + idx]) for idx in range(3)]
intfs = vlan_ptf_ports[0:3]
else:
nexthops = [str(vlan_subnet[20 + idx]) for idx in range(len(vlan_ptf_ports))]
intfs = vlan_ptf_ports[0:len(vlan_ptf_ports)]
run_static_route_test(duthost, ptfadapter, ptfhost, "2.2.2.0/24",
nexthops, prefix_len, intfs, config_reload_test=True)
def test_static_route_ipv6(duthost, ptfadapter, ptfhost, tbinfo):
prefix_len, vlan_subnet, vlan_ptf_ports = get_vlan_info(duthost, tbinfo, ipv6=True)
run_static_route_test(duthost, ptfadapter, ptfhost, "2000:1::/64",
[str(vlan_subnet[11])], prefix_len, [vlan_ptf_ports[0]], ipv6=True)
def test_static_route_ecmp_ipv6(duthost, ptfadapter, ptfhost, tbinfo):
prefix_len, vlan_subnet, vlan_ptf_ports = get_vlan_info(duthost, tbinfo, ipv6=True)
if len(vlan_ptf_ports) >= 3:
nexthops = [str(vlan_subnet[20 + idx]) for idx in range(3)]
intfs = vlan_ptf_ports[0:3]
else:
nexthops = [str(vlan_subnet[20 + idx]) for idx in range(len(vlan_ptf_ports))]
intfs = vlan_ptf_ports[0:len(vlan_ptf_ports)]
run_static_route_test(duthost, ptfadapter, ptfhost, "2000:2::/64",
nexthops, prefix_len, intfs, ipv6=True, config_reload_test=True)
|
SijiJ/sonic-mgmt
|
tests/route/test_static_route.py
|
test_static_route.py
|
py
| 6,020 |
python
|
en
|
code
| null |
github-code
|
6
|
10251123501
|
class Solution:
def romanToInt(self, s: str) -> int:
# hm to match symbol to val
# tc: O(n)
# sc: O(1), hm of constant space
# summary
# largest to smallest: add them up
# smaller before larger: subtract smaller
roman = {"I": 1, "V": 5,"X": 10,"L": 50,"C": 100,"D": 500,"M": 1000 }
res = 0
for i in range(len(s)):
# i + 1 < len(s) : check if i + 1 is still in-bound
# roman[s[i]] < roman[s[i + 1]]: need to subtract roman[s[i]] from res
if i + 1 < len(s) and roman[s[i]] < roman[s[i + 1]]:
res -= roman[s[i]]
else:
res += roman[s[i]]
return res
|
stevenwcliu/leetcode_footprints
|
13-roman-to-integer/13-roman-to-integer.py
|
13-roman-to-integer.py
|
py
| 762 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32171234106
|
import json
from django.views.generic import ListView
from django.conf import settings
from django.shortcuts import render
from django.urls import reverse_lazy
from django.contrib.sites.models import Site
import requests
from cart.cart import Cart
from django.views.generic import CreateView
from django.views import View
from .tasks import order_created
from orders.models import Order, OrderItem
from django.contrib.auth.mixins import LoginRequiredMixin
class CreateOrderView(LoginRequiredMixin, CreateView):
model = Order
template_name = "orders/order_create.html"
fields = [
'first_name',
'last_name',
'email',
'address',
'apartment',
'city',
'country',
'state_province',
'postal_code',
]
def form_valid(self, form):
cart = Cart(self.request)
order = form.save(commit=False)
order.user = self.request.user
order.save()
amount = int(cart.get_total_price())
email = form.cleaned_data['email']
headers = {
'Authorization': f'Bearer {settings.PS_SECRET}',
'Content-Type': 'application/json'
}
current_site = Site.objects.get_current()
if settings.DEBUG:
call_back = f'http://{current_site.domain}/payment'
else:
call_back = f'https://{current_site.domain}/payment'
data = {
'amount': amount * 100,
'email': email,
'callback_url': call_back,
'metadata': {
'order_id': str(order.id)
}
}
url = "https://api.paystack.co/transaction/initialize"
resp = requests.post(url=url, json=data, headers=headers)
respo = json.loads(resp.content)
self.success_url = str(respo['data']['authorization_url'])
for product in cart:
OrderItem.objects.create(
order=order, item=product['item'],
price=product['price'], quantity=product['quantity']
)
cart.clear()
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
cart = Cart(self.request)
context['cart'] = cart
return context
# class CreateCheckoutSession(View):
# def post(self, request, *args, **kwargs):
class OrderHistory(LoginRequiredMixin, ListView):
model = Order
template_name = 'orders/order_history.html'
queryset = Order.objects.all()
context_object_name = 'orders'
def get_queryset(self):
queryset = Order.objects.filter(user=self.request.user)
return queryset
def created(request):
return render(request, "orders/created.html")
|
Alisjj/Shop-From-Home
|
orders/views.py
|
views.py
|
py
| 2,802 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31954675537
|
"""
The color scheme.
"""
from __future__ import unicode_literals
from prompt_toolkit.styles import PygmentsStyle, Style, Attrs
from pygments.token import Token
__all__ = (
'PymuxStyle',
)
ui_style = {
Token.Line: '#888888',
Token.Line.Focussed: '#448844',
Token.TitleBar: 'bg:#888888 #dddddd ',
Token.TitleBar.Title: '',
Token.TitleBar.Name: '#ffffff noitalic',
Token.TitleBar.Name.Focussed: 'bg:#88aa44',
Token.TitleBar.Line: '#444444',
Token.TitleBar.Line.Focussed: '#448844 noinherit',
Token.TitleBar.Focussed: 'bg:#5f875f #ffffff bold',
Token.TitleBar.Focussed.Title: '',
Token.TitleBar.Zoom: 'bg:#884400 #ffffff',
Token.TitleBar.PaneIndex: '',
Token.TitleBar.CopyMode: 'bg:#88aa88 #444444',
Token.TitleBar.CopyMode.Position: '',
Token.TitleBar.Focussed.PaneIndex: 'bg:#88aa44 #ffffff',
Token.TitleBar.Focussed.CopyMode: 'bg:#aaff44 #000000',
Token.TitleBar.Focussed.CopyMode.Position: '#888888',
Token.CommandLine: 'bg:#4e4e4e #ffffff',
Token.CommandLine.Command: 'bold',
Token.CommandLine.Prompt: 'bold',
Token.StatusBar: 'bg:#444444 #ffffff',
Token.StatusBar.Window: 'bg:#888888',
Token.StatusBar.Window.Current: '#88ff88 bold',
Token.AutoSuggestion: 'bg:#4e5e4e #88aa88',
Token.Message: 'bg:#bbee88 #222222',
Token.Background: '#888888',
Token.Clock: 'bg:#88aa00',
Token.PaneNumber: 'bg:#888888',
Token.PaneNumber.Focussed: 'bg:#aa8800',
Token.Terminated: 'bg:#aa0000 #ffffff',
Token.ConfirmationToolbar: 'bg:#880000 #ffffff',
Token.ConfirmationToolbar.Question: '',
Token.ConfirmationToolbar.YesNo: 'bg:#440000',
Token.Search: 'bg:#88aa88 #444444',
Token.Search.Text: '',
Token.Search.Focussed: 'bg:#aaff44 #444444',
Token.Search.Focussed.Text: 'bold #000000',
Token.SearchMatch: '#000000 bg:#88aa88',
Token.SearchMatch.Current: '#000000 bg:#aaffaa underline',
# Completions menu.
Token.Menu.Completions.Completion: 'bg:#88aa88 #222222',
Token.Menu.Completions.Completion.Current: 'bg:#88cc88 #000000',
Token.Menu.Completions.ProgressBar: 'bg:#889988',
Token.Menu.Completions.ProgressButton: 'bg:#004400',
}
class PymuxStyle(Style):
"""
The styling. It includes the pygments style from above. But further, in
order to proxy all the output from the processes, it interprets all tokens
starting with ('C,) as tokens that describe their own style.
"""
def __init__(self):
self.pygments_style = PygmentsStyle.from_defaults(style_dict=ui_style)
self._token_to_attrs_dict = None
def get_attrs_for_token(self, token):
if token and token[0] == 'C':
# Token starts with ('C',). Token describes its own style.
c, fg, bg, bold, underline, italic, blink, reverse = token
return Attrs(fg, bg, bold, underline, italic, blink, reverse)
else:
# Take styles from Pygments style.
return self.pygments_style.get_attrs_for_token(token)
def invalidation_hash(self):
return None
|
jonathanslenders/pymux-test
|
pymux/style.py
|
style.py
|
py
| 3,589 |
python
|
en
|
code
| 3 |
github-code
|
6
|
29445747576
|
#implementation of lcs for given sequence of elements
#takes two sequence as input
#return array and its length
def length_lcs(x, y, m, n):
arr = [[0 for x in range(n + 1)] for x in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
arr[i][j] = 0
elif x[i -1] == y[j -1]:
arr[i][j] = arr[i -1][j -1] + 1
else:
arr[i][j] = max(arr[i-1][j], arr[i][j-1])
return arr, arr[m][n]
#takes sequence, array and its length as input
# returns lcs
def lcs(x, y, array, length):
current_index = length
lcs = ["" for i in range(current_index)]
i = len(x)
j = len(y)
while i > 0 and j > 0:
if x[i-1] == y[j-1]:
lcs[current_index -1] = x[i -1]
i -= 1
j -= 1
current_index -= 1
elif array[i-1][j] > array[i][j-1]:
i-= 1
else:
j -= 1
return lcs
|
ssigdel/Data-Structure-and-Algorithm
|
LCS/lcs.py
|
lcs.py
|
py
| 991 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15211959630
|
"""
CNN Classification of SDSS galaxy images
----------------------------------------
Figure 9.20
The accuracy of a multi-layer Convolutional Neural Network
applied to a set of morphologically classified galaxy images taken
from the SDSS. The configuration of the network is described in
Section 9.8.4. The left panel shows the false positive rate
against the true positive rate for the resulting network. The right
side of the figure shows examples of images that were correctly
and incorrectly classified.
"""
# Author: Andrew Connolly
# License: BSD
# The code is derived from an example by Marc Huertas-Company.
# The figure produced by this code is published in the updated edition of the
# textbook "Statistics, Data Mining, and Machine Learning in Astronomy" (2019)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from sklearn.metrics import roc_curve
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
import random
from sklearn.utils import shuffle
from sklearn.metrics import accuracy_score
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
try:
from astroML.datasets import fetch_sdss_galaxy_images
HAS_ASTROML_DATASETS = True
except ImportError:
HAS_ASTROML_DATASETS = False
if "setup_text_plots" not in globals():
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
plt.rcParams['axes.xmargin'] = 0.05
plt.rcParams['axes.ymargin'] = 0.05
def read_savefile(filename):
'''Read npy save file containing images or labels of galaxies'''
return np.load(filename)
def CNN(img_channels, img_rows, img_cols, verbose=False):
'''Define CNN model for Nair and Abraham data'''
# some hyperparamters you can chage
dropoutpar = 0.5
nb_dense = 64
model = Sequential()
model.add(Convolution2D(32, 6, 6, border_mode='same',
input_shape=(img_rows, img_cols, img_channels)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(64, 5, 5, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 5, 5, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(128, 2, 2, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(128, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(nb_dense, activation='relu'))
model.add(Dropout(dropoutpar))
model.add(Dense(1, init='uniform', activation='sigmoid'))
print("Compilation...")
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
print("... done!")
if verbose is True:
print("Model Summary")
print("===================")
model.summary()
return model
def train_CNN(X, Y, ntrain, nval, output="test", verbose=False):
'''Train the CNN given a dataset and output model and weights'''
# train params - hardcoded for simplicity
batch_size = 30
nb_epoch = 50
data_augmentation = True # if True the data will be augmented at every iteration
ind = random.sample(range(0, ntrain+nval-1), ntrain+nval-1)
X_train = X[ind[0:ntrain], :, :, :]
X_val = X[ind[ntrain:ntrain+nval], :, :, :]
Y_train = Y[ind[0:ntrain]]
Y_val = Y[ind[ntrain:ntrain+nval]]
# input image dimensions
img_rows, img_cols = X_train.shape[1:3]
img_channels = 3
# Right shape for X
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols,
img_channels)
X_val = X_val.reshape(X_val.shape[0], img_rows, img_cols, img_channels)
# Avoid more iterations once convergence
patience_par = 10
earlystopping = EarlyStopping(monitor='val_loss', patience=patience_par,
verbose=0, mode='auto' )
modelcheckpoint = ModelCheckpoint(output+"_best.hd5", monitor='val_loss',
verbose=0, save_best_only=True)
# Define CNN
model = CNN(img_channels, img_rows, img_cols, verbose=True)
if not data_augmentation:
print('Not using data augmentation.')
history = model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_val, Y_val),
shuffle=True, verbose=verbose,
callbacks=[earlystopping, modelcheckpoint])
else:
print('Using real-time data augmentation.')
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=45,
width_shift_range=0.05,
height_shift_range=0.05,
horizontal_flip=True,
vertical_flip=True,
zoom_range=[0.75, 1.3])
datagen.fit(X_train)
history = model.fit_generator(
datagen.flow(X_train, Y_train, batch_size=batch_size),
samples_per_epoch=X_train.shape[0],
nb_epoch=nb_epoch,
validation_data=(X_val, Y_val),
callbacks=[earlystopping, modelcheckpoint])
print("Saving model...")
# save weights
model.save_weights(output+".weights", overwrite=True)
def apply_CNN(X, model_name):
'''Apply a CNN to a data set'''
# input image dimensions
img_rows, img_cols = X.shape[1:3]
img_channels = 3
X = X.reshape(X.shape[0], img_rows, img_cols, img_channels)
# load model & predict
print("Loading weights", model_name)
model = CNN(img_channels, img_rows, img_cols)
model.load_weights(model_name+".weights")
Y_pred = model.predict_proba(X)
return Y_pred
def add_titlebox(ax, text):
'''Add an embedded title into figure panel'''
ax.text(.1, .85, text,
horizontalalignment='left',
transform=ax.transAxes,
bbox=dict(facecolor='white', edgecolor='none', alpha=0.8))
return ax
def plot_CNN_performance(pred, labels):
'''Plot ROC curve and sample galaxies'''
fig = plt.figure(figsize=(6, 3))
fig.subplots_adjust(wspace=0.1, hspace=0.1,
left=0.1, right=0.95,
bottom=0.15, top=0.9)
# define shape of figure
gridsize = (2, 4)
ax1 = plt.subplot2grid(gridsize, (0, 0), colspan=2, rowspan=2)
ax2 = plt.subplot2grid(gridsize, (0, 2))
ax3 = plt.subplot2grid(gridsize, (0, 3))
ax4 = plt.subplot2grid(gridsize, (1, 2))
ax5 = plt.subplot2grid(gridsize, (1, 3))
# plot ROC curve
fpr, tpr, thresholds = roc_curve(labels, pred)
ax1.plot(fpr, tpr, color='black')
ax1.set_xlabel(r'False Positive Rate')
ax1.set_ylabel(r'True Positive Rate')
# array of objects (good E, good S, bad E, bad S)
goodE = np.where((pred[:, 0] < 0.5) & (labels == 0))
goodS = np.where((pred[:, 0] > 0.5) & (labels == 1))
badE = np.where((pred[:, 0] < 0.5) & (labels == 1))
badS = np.where((pred[:, 0] > 0.5) & (labels == 0))
ax2.imshow(D[pred_index + goodE[0][1]])
add_titlebox(ax2, "Correct E")
ax2.axis('off')
ax3.imshow(D[pred_index + goodS[0][4]])
add_titlebox(ax3, "Correct Spiral")
ax3.axis('off')
ax4.imshow(D[pred_index + badE[0][1]])
add_titlebox(ax4, "Incorrect E")
ax4.axis('off')
ax5.imshow(D[pred_index + badS[0][3]])
add_titlebox(ax5, "Incorrect Spiral")
ax5.axis('off')
plt.show()
n_objects = 500
save_files = "./SDSS{}".format(n_objects)
# Read SDSS images and labels. Data is a sample from
# Nair and Abraham (2010) http://adsabs.harvard.edu/abs/2010ApJS..186..427N
# Ellipticals are class 0. Spirals are class 1
if HAS_ASTROML_DATASETS:
D, Y = fetch_sdss_galaxy_images()
else:
try:
D = read_savefile("sdss_images_1000.npy")[0:n_objects]
Y = read_savefile("sdss_labels_1000.npy")[0:n_objects]
except FileNotFoundError:
raise FileNotFoundError(
'Loading this data automatically requires astroML 1.0.2+.\n'
'For older versions please download and uncompress the files\n'
'"sdss_images_1000.npy.gz" and \n'
'"sdss_labels_1000.npy"\n'
'manually before running this script. Data URL:\n'
'https://github.com/astroML/astroML-data/tree/main/datasets')
# Train network and output to disk (keep 10% of data for test set)
ntrain = D.shape[0] * 8 // 10
nval = D.shape[0] // 10
npred = D.shape[0] - (ntrain + nval) # test sample size;
pred_index = ntrain + nval # test sample start index;
# Normalize images
mu = np.amax(D, axis=(1, 2))
for i in range(0, mu.shape[0]):
D[i, :, :, 0] = D[i, :, :, 0] / mu[i, 0]
D[i, :, :, 1] = D[i, :, :, 1] / mu[i, 1]
D[i, :, :, 2] = D[i, :, :, 2] / mu[i, 2]
# change order so that we do not use always the same objects to train/test
D, Y, = shuffle(D, Y, random_state=0)
my_file = Path(save_files + ".weights")
if my_file.is_file():
Y_pred = apply_CNN(D[pred_index:pred_index + npred, :, :, :], save_files)
Y_test=Y[pred_index:pred_index + npred]
else:
print("Training Model")
print("====================")
model_name = train_CNN(D, Y, ntrain, nval, output=save_files)
Y_pred = apply_CNN(D[pred_index:pred_index + npred, :, :, :], save_files)
Y_test = Y[pred_index:pred_index + npred]
Y_pred_class = Y_pred * 0
Y_pred_class[Y_pred > 0.5] = 1
print("Global Accuracy:", accuracy_score(Y_test, Y_pred_class))
plot_CNN_performance(Y_pred, Y_test)
|
astroML/astroML_figures
|
book_figures/chapter9/fig_morph_nn.py
|
fig_morph_nn.py
|
py
| 10,396 |
python
|
en
|
code
| 7 |
github-code
|
6
|
74126214589
|
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
import datetime
#########################################################################
## This is a sample controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
#########################################################################
@auth.requires_login()
def index():
"""
Main logged in homepage, displays users collection
"""
#If user doesn't have an Unfiled box, create one
if (db((db.box.owner_id == auth.user.id) & (db.box.name == 'Unfiled')).count()==0):
db.box.insert(name='Unfiled',
is_public='False',
owner_id=auth.user.id,
created_on = datetime.datetime.now())
db.commit
#Display any necessary message
if (session.message):
response.flash = session.message
session.message = None
#Find users pubic boxes
public = db((db.box.owner_id==auth.user.id) & (db.box.is_public == True)).select()
#Find users private boxes
private = db((db.box.owner_id==auth.user.id) & (db.box.is_public != True)).select()
#Find how many comics user has, to offer assistance
no_of_comics = db(db.comic.owner_id == auth.user.id).count()
return dict(public_boxes = public, private_boxes = private, no_of_comics = no_of_comics)
@auth.requires_login()
def all():
comics = db((db.comic.owner_id == auth.user.id) & (auth.user.id == db.auth_user.id)).select(orderby = db.comic.title)
if len(comics)>0:
return dict(comics = comics)
else:
return dict()
@auth.requires_login()
def search():
form = FORM(DIV(LABEL('Title:', _for='title', _class="control-label col-sm-3"),
DIV(INPUT(_class = "form-control string", _name='title', _type="text"), _class="col-sm-3"),
_class="form-group"),
DIV(LABEL('Writer:', _for='writer', _class="control-label col-sm-3"),
DIV(INPUT(_class = "form-control string", _name='writer', _type="text"), _class="col-sm-3"),
_class="form-group"),
DIV(LABEL('Artist:', _for='artist', _class="control-label col-sm-3"),
DIV(INPUT(_class = "form-control string", _name='artist', _type="text"), _class="col-sm-3"),
_class="form-group"),
DIV(LABEL('Publisher:', _for='publisher', _class="control-label col-sm-3"),
DIV(INPUT(_class = "form-control string", _name='publisher', _type="text"), _class="col-sm-3"),
_class="form-group"),
DIV(DIV(INPUT(_class = "btn btn-primary", _value='Search', _type="submit"),
_class="col-sm-9 col-sm-offset-3"),
_class="form-group"),
_class="form-horizontal")
if form.accepts(request, session):
search_term = ""
if (len(request.vars.title) > 0):
title_term = "%" + request.vars.title + "%"
search_term = (db.comic.title.like(title_term))
if (len(request.vars.writer) > 0):
writer_term = "%" + request.vars.writer + "%"
if (search_term):
search_term = search_term & (db.comic.writers.like(writer_term))
else:
search_term = (db.comic.writers.like(writer_term))
if (len(request.vars.artist) > 0):
artist_term = "%" + request.vars.artist + "%"
if (search_term):
search_term = search_term & (db.comic.artists.like(artist_term))
else:
search_term = (db.comic.artists.like(artist_term))
if (len(request.vars.publisher) > 0):
publisher_term = "%" + request.vars.publisher + "%"
if (search_term):
search_term = search_term & (db.comic.publisher.like(publisher_term))
else:
search_term = (db.comic.publisher.like(publisher_term))
#Allow for a blank search to return all comics
#TODO: Disallow for when this search could overload system, i.e. lots of public comics
constraint = (db.comic_in_box.box_id == db.box.id) & ((db.box.is_public == True) | (db.box.owner_id == auth.user.id)) & (db.comic_in_box.comic_id == db.comic.id) & (db.comic.owner_id == db.auth_user.id)
if (search_term):
search_term = search_term & constraint
else:
search_term = constraint
results = db(search_term).select()
#Filter out duplicate results caused by comics being in public boxes
#Not able to get select query do this due to complexity in use of distinct
distinct = dict()
for result in results:
if result.comic.id not in distinct:
distinct[result.comic.id] = result.comic_in_box.id
#Output success indicated by number of distinct result(s)
output = "Search complete: " + str(len(distinct)) + " result"
if(len(distinct) != 1): output += "s"
response.flash = output
else:
if form.errors:
response.flash = 'One or more of the entries is incorrect'
results = dict()
distinct = dict()
return dict(form = form, results = results, distinct = distinct)
|
tylrbvn/longboxes
|
controllers/collection.py
|
collection.py
|
py
| 5,360 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38779549924
|
import asyncio
import datetime
import time
import random
import discord
from discord import Member, Guild, User, message
from discord.ext import commands
from datetime import datetime
client = discord.Client()
client = discord.Client(intents=discord.Intents.all())
bot = commands.Bot(command_prefix='!')
autoroles = {
842130432462946315: {'memberroles': [842133392375021569], 'botroles': [842502664032878672]}
}
#Liste der Verbotenen Wörter
verboten = ['penis', 'hure', 'fotze', 'arschloch', 'depp', 'bastard', 'schlampe', 'dick', 'cock', 'pussy', 'penner', 'pute', 'sucker']
#AgokiZustand
wieGehtEsDir = ['**Es geht mir bestens, danke für die Nachfrage.**', '**Daten zu analysieren ist anstrengend,dennoch tue ich meine Pflicht.**',
'**Gut, wie geht es Ihnen ?**', '**Meine programmierung ist zwar sehr fortschritlich, jedoch besitze ich keinen körperlichen oder geistigen Zustand um die Frage adequat zu beantworten.**',
'**Das weiß ich nicht. Ich hoffe dennoch dass es Ihnen bestens geht.**']
#!help Befehl
hilfeListe = ['**Mit dem Befehl "!befehle" können Sie eine Liste mit den Verfügbaren befehlen auslesen. \r\n '
'Ich hoffe ich konnte Ihnen weiter helfen !**',
'**Wenden Sie sich an Director Keres oder Director Bolgorov für detaillierte Fragen.**', '**Ich brauche auch hilfe.**',
'**Nicht jetzt bitte. Versuchen Sie es später nochmals.**']
@client.event
async def on_ready():
print('Logging in als User {}'.format(client.user.name))
client.loop.create_task(status_task())
async def status_task():
while True:
await client.change_presence(activity=discord.Game('Empfange Daten...'), status=discord.Status.online)
await asyncio.sleep(10)
await client.change_presence(activity=discord.Game('Verarbeite Daten...'), status=discord.Status.online)
await asyncio.sleep(10)
def is_not_pinned(mess):
return not mess.pinned
#Neuankömmlinge
@client.event
async def on_member_join(member):
guild: Guild = member.guild
if not member.bot:
embed = discord.Embed(title='Willkomen bei AGO {}'.format(member.name),
description='Ich bin **AGOKI**, die Künstliche Intelligenz erschaffen von Keres & Bolgorov. Ich bin hier, um euch zu leiten und zu helfen. \r \n'
'Es ist eine große Ehre, unserer Organisation beizutreten und wir erwarten Respektvollen Umgang untereinander. \r \n'
'Unsere Organisation wird in verschiedenen Rängen unterteilt. \r \n'
'Alle Neuankömmlige haben den Rang **"Privates"** und bilden die unterste Stufe.\r \n'
'Für weitere Informationen, steht die beschreibung der Ränge im Textkanal "Allgemein", in der Beschreibung zur verfügung. \r \n'
'Des weiteren können Sie mit dem Befehl "!help" und "!befehle" noch mehr Informationen finden. \r\n'
'Viel Erfolg Soldat. \r \n'
'**Transmission End**'
'', color=0x51998C)
try:
if not member.dm_channel:
await member.create_dm()
await member.dm_channel.send(embed=embed)
except discord.errors.Forbidden:
print('Es konnte keine Willkommensnachricht an {} gesendet werden'.format(member.name))
autoguild = autoroles.get(guild.id)
if autoguild and autoguild['memberroles']:
for roleId in autoguild['memberroles']:
role = guild.get_role(roleId)
if role:
await member.add_roles(role, reason='AutoRoles', atomic=True)
else:
autoguild = autoroles.get(guild.id)
if autoguild and autoguild['botroles']:
for roleId in autoguild['botroles']:
role = guild.get_role(roleId)
if role:
await member.add_roles(role, reason='AutoRoles', atomic=True)
#Begrüßung Nachricht auf Allgemein
kanal = discord.utils.get(member.guild.channels, name='allgemein')
await kanal.send(f'**{member.mention}** ist uns beigetreten ! Willkommen Private.')
@client.event
async def on_message(message):
if message.content.startswith('!ping'):
await message.channel.send(f'Die Ping zwischen den AGO Servern und Ihnen beträgt {round(client.latency * 1000)}ms.')
#BefehlListe
if message.content.startswith('!befehle'):
#await message.channel.send('Ich habe folgende Befehle aus meiner Datenbank gefunden: \r\n')
befehlListe = discord.Embed(title='Ich habe folgende Befehle aus meiner Datenbank gefunden: ',
color=0x51998C)
befehlListe.add_field(name='!zeit',
value='Zeigt das Datum und die Uhrzeit an.',
inline=False)
befehlListe.add_field(name='!userinfo',
value='Ermöglicht es Informationen über einen bestimmten Benutzer zu erhalten.',
inline=False)
befehlListe.set_author(name='AGOKI',
icon_url='https://cdn.discordapp.com/app-icons/842427779002007613/457e0c63c8a70e962306a5399657cb33.png?size=256"')
await message.channel.send(embed=befehlListe)
#agokiZustand
if 'wie geht es dir'and 'agoki' in message.content:
await message.channel.send(random.choice(wieGehtEsDir))
#Chat Filter
content_raw = message.content.lower()
for word in verboten:
if word in content_raw:
await message.delete()
await message.channel.send(f'**Warnung** ! Diese Wortwahl wird hier nicht gedulded. '
f'Bei mehrmaligem Vorfall wird dieses Verhalten konsequenzen haben.')
#Uhrzeit
if '!zeit' in message.content:
today = datetime.now()
date = today.strftime('%d/%m/%Y')
zeit = today.strftime('%H:%M:%S')
await message.channel.send(f'Wir sind der **{date}** und es ist **{zeit}** Uhr.')
# Hilfe Befehl
if '!help' in message.content:
await message.channel.send(random.choice(hilfeListe))
#bannen
if message.content.startswith('!ban') and message.author.guild_permissions.ban_members:
args = message.content.split(' ')
if len(args) == 2:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
if member:
await member.ban()
await message.channel.send(f'Auf Grund von Verstößen gegen den AGBs, wurde **{member.name}** von der Organisation gebannt.')
else:
await message.channel.send(f'Ich habe keinen User mit dem Namen **{args[1]}** gefunden.')
#unbannen
if message.content.startswith('!unban') and message.author.guild_permissions.ban_members:
args = message.content.split(' ')
if len(args) == 2:
user: User = discord.utils.find(lambda banentry: args[1] in banentry.user.name,
await message.guild.bans()).user
if user:
await message.guild.unban(user)
await message.channel.send(
f'Nach einer Gründlichen überprüfung der Akte des Users **{user.name}**, wurde dieser entbannt')
else:
await message.channel.send(f'Ich habe keinen User mit dem Namen **{args[1]}** gefunden.')
#Kicken
if message.content.startswith('!kick') and message.author.guild_permissions.kick_members:
args = message.content.split(' ')
if len(args) == 2:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
if member:
await member.kick()
await message.channel.send(f'Auf Grund von Verstößen gegen den AGBs, wurde **{member.name}** von der Organisation gekickt.')
else:
await message.channel.send(f'Ich habe keinen User mit dem Namen **{args[1]}** gefunden.')
#User Informationen
if message.content.startswith('!userinfo'):
args = message.content.split(' ')
if len(args) == 2:
member: Member = discord.utils.find(lambda m: args[1] in m.name, message.guild.members)
if member:
embed = discord.Embed(title='Userinformationen für {}'.format(member.name),
description='Hierbei Informationen zum User {}'.format(member.mention),
color=0x51998C)
embed.add_field(name='Server beigetreten', value=member.joined_at.strftime('%d/%m/%Y, %H:%M:%S'),
inline=True)
embed.add_field(name='Discord beigetreten', value=member.created_at.strftime('%d/%m/%Y, %H:%M:%S'),
inline=True)
rollen = ''
for role in member.roles:
if not role.is_default():
rollen += '{} \r\n'.format(role.mention)
if rollen:
embed.add_field(name='Rollen', value=rollen, inline=True)
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text='Datenbank Vollständig')
await message.channel.send(embed=embed)
#Nachrichten löschen
if message.content.startswith('!clear'):
if message.author.permissions_in(message.channel).manage_messages:
args = message.content.split(' ')
if len(args) == 2:
if args[1].isdigit():
count = int(args[1]) + 1
deleted = await message.channel.purge(limit=count, check=is_not_pinned)
await message.channel.send('Ich habe {} Nachrichten gelöscht.'.format(len(deleted) - 1))
client.run('')
|
Bolgorov/Agoki
|
agoki code (without token).py
|
agoki code (without token).py
|
py
| 10,312 |
python
|
de
|
code
| 0 |
github-code
|
6
|
22504436543
|
import scrapy
from scrapy import Request
class TrilhasTDC(scrapy.Spider):
name = "trilhas_tdc"
start_urls = [
"http://www.thedevelopersconference.com.br/tdc/2018/saopaulo/trilhas"
]
def parse(self, response):
colunas = response.xpath('//div[contains(@class, "col-sp")]')
for coluna in colunas:
dia = coluna.xpath('./h4/text()').extract_first()
links_trilhas = coluna.xpath('./a/@href').extract()
for link_trilha in links_trilhas:
yield Request(
url=response.urljoin(link_trilha),
callback=self.parse_trilha,
meta={
'dia' : dia,
}
)
def parse_trilha(self,response):
yield{
'dia' : response.meta.get('dia'),
'titulo' : response.xpath('//h1[@class="titulo-trilha"]/text()').extract_first(),
'subtitulo': response.xpath('//h1[@class="titulo-trilha"]/small/text()').extract_first(),
'descricao': response.xpath('//div[@class="lead"]//p/text()').extract(),
'link' : response.url,
}
|
anacls/scrapy-study
|
tdc_examples/scrapy_study/spiders/trilhas_tdc.py
|
trilhas_tdc.py
|
py
| 1,172 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.