seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
29325722402
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
To read sequentially the data from the hard disk, the data format is convert to TFRecord.
"""
import re
from pathlib import Path
import pandas as pd
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from sklearn.utils import shuffle
__date__ = '2021/04/06'
def load_data(npy_path, csv_path):
images = np.load(npy_path)
df = pd.read_csv(csv_path, index_col=None, header=0)
d = {'artifact': 0, 'galx_artificial_real': 1, 'rand_artificial_real': 1}
labels = df['object_type'].map(d).values
return images, labels
def _bytes_feature(value):
"""Returns byte_list type from string / byte type."""
if isinstance(value, type(tf.constant(0))):
# BytesList won't unpack a string from an EagerTensor.
value = value.numpy()
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _float_feature(value):
"""Returns float_list type from float / double type."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _int64_feature(value):
"""Return Int64_list type from bool / enum / int / uint type."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def make_example(image, label, detector_id, sample_index, unique_index):
"""Convert data formats."""
feature = {
'image': _float_feature(image.reshape(-1)),
'label': _int64_feature([label]),
'detector_id': _int64_feature([detector_id]),
'sample_index': _int64_feature([sample_index]),
'unique_index': _int64_feature([unique_index])
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def main():
data_dir = Path('../../data/raw/real_bogus1')
npy_list = list(data_dir.glob('images*.npy'))
npy_list.sort()
output_dir = Path('../../data/processed/real_bogus1')
if not output_dir.exists():
output_dir.mkdir(parents=True)
r = re.compile(r'images(\d+)')
unique_id = 0
# Size and start of unique index for each detector.
data_info = {'detector_id': [], 'size': [], 'start_index': []}
for npy_path in npy_list:
m = r.search(npy_path.stem)
detector_id = int(m.group(1))
csv_path = data_dir / 'params{}.csv'.format(detector_id)
images, labels = load_data(npy_path=npy_path, csv_path=csv_path)
n = len(images)
indices = np.arange(n)
# Unique index across the entire data set.
unique_indices = indices + unique_id
data_info['detector_id'].append(detector_id)
data_info['size'].append(n)
data_info['start_index'].append(unique_id)
unique_id += n
images, labels, indices, unique_indices = shuffle(
images, labels, indices, unique_indices
)
# Write TFRecord.
record_path = str(output_dir / 'data{}.tfrecord'.format(detector_id))
with tf.io.TFRecordWriter(
record_path,
tf.io.TFRecordOptions(compression_type='GZIP')) as writer:
for image, label, index, unique_index in zip(
tqdm(images, desc=str(detector_id)), labels, indices,
unique_indices):
example = make_example(
image=image, label=label, detector_id=detector_id,
sample_index=index, unique_index=unique_index
)
writer.write(example.SerializeToString())
# Save the information of each file.
df = pd.DataFrame(data_info)
df.to_csv(output_dir / 'data_info.csv')
if __name__ == '__main__':
main()
|
ichiro-takahashi/tomoe-realbogus
|
src/data/make_record.py
|
make_record.py
|
py
| 3,756 |
python
|
en
|
code
| 2 |
github-code
|
6
|
27251363866
|
"""
文件名: Code/Chapter07/C02_RNNImgCla/FashionMNISTRNN.py
创建时间: 2023/4/27 8:08 下午
作 者: @空字符
公众号: @月来客栈
知 乎: @月来客栈 https://www.zhihu.com/people/the_lastest
"""
import torch
import torch.nn as nn
import sys
sys.path.append('../../')
from Chapter06.C04_LN.layer_normalization import LayerNormalization
class FashionMNISTRNN(nn.Module):
def __init__(self, input_size=28, hidden_size=128,
num_layers=2, num_classes=10):
super(FashionMNISTRNN, self).__init__()
self.rnn = nn.RNN(input_size, hidden_size,nonlinearity='relu',
num_layers=num_layers, batch_first=True)
self.classifier = nn.Sequential(LayerNormalization(hidden_size),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, num_classes))
def forward(self, x, labels=None):
x = x.squeeze(1) # [batch_size,1,28,28] --> [batch_size,28,28]
x, _ = self.rnn(x) # input: [batch_size, time_steps, input_size]
# x: [batch_size, time_steps, hidden_size]
logits = self.classifier(x[:, -1].squeeze(1))
# 取最后一个时刻进行分类,[batch_size, 1,hidden_size]---squeeze-->[batch_size,hidden_size]
# logits: [batch_size, hidden_size]
if labels is not None:
loss_fct = nn.CrossEntropyLoss(reduction='mean')
loss = loss_fct(logits, labels)
return loss, logits
else:
return logits
if __name__ == '__main__':
model = FashionMNISTRNN()
x = torch.rand([32, 1, 28, 28])
y = model(x)
print(y.shape)
|
moon-hotel/DeepLearningWithMe
|
Code/Chapter07/C02_RNNImgCla/FashionMNISTRNN.py
|
FashionMNISTRNN.py
|
py
| 1,754 |
python
|
en
|
code
| 116 |
github-code
|
6
|
75282287228
|
import re
import sys
def part1():
recepies = [3, 7]
a_index, b_index = 0, 1
in_data = 440231
while len(recepies) < in_data + 10:
s = recepies[a_index] + recepies[b_index]
for l in str(s):
recepies.append(int(l))
recepies_len = len(recepies)
a_index = (a_index + recepies[a_index] + 1) % recepies_len
b_index = (b_index + recepies[b_index] + 1) % recepies_len
return ''.join(map(str, recepies[in_data:in_data+10]))
def part2():
recepies = [3, 7]
a_index, b_index = 0, 1
in_data = list(map(int, list('01245')))
in_data = list(map(int, list('59414')))
in_data = list(map(int, list('440231')))
in_data_len = len(in_data)
build_index = 0
while True:
s = recepies[a_index] + recepies[b_index]
for l in str(s):
l = int(l)
recepies.append(int(l))
if l == in_data[build_index]:
build_index += 1
else:
build_index = 0
if build_index >= in_data_len:
print('FOUND!')
return(len(recepies) - abs(in_data_len))
recepies_len = len(recepies)
a_index = (a_index + recepies[a_index] + 1) % recepies_len
b_index = (b_index + recepies[b_index] + 1) % recepies_len
def main():
print(part1())
print(part2())
if __name__ == '__main__':
main()
|
elitan/adventofcode
|
2018/14/main.py
|
main.py
|
py
| 1,209 |
python
|
en
|
code
| 1 |
github-code
|
6
|
30590549707
|
import numpy as np
from pathlib import Path
from PIL import Image, ImageDraw, ImageFont
from tqdm import tqdm
SQSIZE = 60
SPACING = 15
def make_col(start, end, n=5):
"""Create a column of numbers."""
nums = np.random.choice(np.arange(start, end+1), size=n, replace=False)
return nums
def generate_card():
"""Create a bingo card and save it as PNG image."""
# Create five columns
cols = np.array([make_col(15*i + 1, 15*i + 15) for i in range(5)])
# Replace the center cell by the median of the first column
# so that it ends up in the middle when sorting the columns
cols[2, 2] = np.median(np.r_[
cols[2, :2],
cols[2, 3:]
])
# Sort the columns
rows = np.sort(cols.T, axis=0)
rows[2, 2] = -1
cols = rows.T
# Create the bingo image and fill the background with a light color
bgcolor = tuple(np.random.randint(200, 255) for _ in range(3))
textcolor = tuple(np.random.randint(50, 150) for _ in range(3))
img_width = 5 * SQSIZE + 6 * SPACING
img_height = 6 * SQSIZE + 7 * SPACING
img = Image.new("RGB", (img_width, img_height), color=bgcolor)
draw = ImageDraw.Draw(img)
topfont = ImageFont.truetype(r"C:\Windows\Fonts\CALIST.TTF", size=int(SQSIZE * 0.75))
numfont = ImageFont.truetype(r"C:\Windows\Fonts\CALIST.TTF", size=SQSIZE // 2)
for rowidx in range(5):
# Show one letter from 'BINGO' at the top of the column
x0 = SPACING + SQSIZE // 4 + (SPACING + SQSIZE) * rowidx
y0 = SPACING
draw.text((x0, y0), "BINGO"[rowidx], font=topfont, fill=textcolor)
for colidx in range(5):
# Create a square to put the number in
x0 = SPACING + (SPACING + SQSIZE) * rowidx
y0 = SPACING + (SPACING + SQSIZE) * (colidx + 1)
x1 = x0 + SQSIZE
y1 = y0 + SQSIZE
draw.rectangle([x0, y0, x1, y1], outline=(0, 0, 0))
# Create the text for the number
text = str(rows[colidx, rowidx])
textcoords = (x0+SPACING, y0+SPACING)
# For single-digit numbers, move the text to center it
if rows[colidx, rowidx] < 10:
textcoords = (x0 + int(SPACING * 1.5), y0 + SPACING)
font = numfont
# For the center box: other text and font size
if rowidx == colidx == 2:
text = "BONUS"
font = ImageFont.truetype(r"C:\Windows\Fonts\CALIST.TTF", size=SQSIZE // 5)
textcoords = (x0 + SPACING // 2 + 1, y0 + int(SPACING * 1.5))
# Put the number in the square
draw.text(textcoords, text, font=numfont, fill=textcolor)
# Create a filename with a number that doesn't exist yet
bingodir = Path(__file__).parent
volgnr = 0
while True:
fn = bingodir / f"kaart{volgnr:03d}.png"
if not fn.is_file():
break
volgnr += 1
# Finally, save the image
img.save(fn)
if __name__ == "__main__":
for _ in tqdm(range(150)):
generate_card()
|
Lewistrick/bingogenerator
|
bingo.py
|
bingo.py
|
py
| 3,060 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21884308147
|
"""Setting up various cosmic populations."""
from frbpoppy import CosmicPopulation
# You can set up a population with arguments ...
pop = CosmicPopulation(1e4, n_days=1, name='my_own_population', repeaters=True,
generate=False)
# ... but also adapt specific components:
# The numer density / distance parameters
pop.set_dist(model='vol_co', z_max=0.01, alpha=-1.5,
H_0=67.74, W_m=0.3089, W_v=0.6911)
# Which dispersion measure components to include
pop.set_dm(mw=True, igm=True, host=True)
# Dispersion measure properties
pop.set_dm_host(model='gauss', mean=100, std=200)
pop.set_dm_igm(model='ioka', slope=1000, std=None)
pop.set_dm_mw(model='ne2001')
# Emission range of FRB sources
pop.set_emission_range(low=100e6, high=10e9)
# Luminsity of FRBs
# See the per_source argument? That allows you to give different properties
# to different bursts from the same source. You can do that for the luminosity,
# or any of the following parameters
pop.set_lum(model='powerlaw', low=1e38, high=1e38, power=0,
per_source='different')
# Pulse width
pop.set_w(model='uniform', low=10, high=10)
# Spectral index
pop.set_si(model='gauss', mean=0, std=0)
# If repeaters, how they repeat
pop.set_time(model='regular', rate=2)
# And then generate the population!
pop.generate()
# Or simply use some predefined models
pop_simple = CosmicPopulation.simple(1e4, generate=True)
pop_complex = CosmicPopulation.complex(1e4, generate=True)
|
TRASAL/frbpoppy
|
examples/setting_up_populations.py
|
setting_up_populations.py
|
py
| 1,478 |
python
|
en
|
code
| 26 |
github-code
|
6
|
9830880946
|
# -*- coding: utf-8 -*-
##
# @file __init__.py
# @brief Contain paths to information files
# @author Gabriel H Riqueti
# @email [email protected]
# @date 06/05/2021
#
import os
from pathlib import Path
PATH_NERNST_EQUATION_INFO = Path(os.path.abspath(__file__)).parent / 'nernst_equation.txt'
PATH_GOLDMAN_EQUATION_INFO = Path(os.path.abspath(__file__)).parent / 'goldman_equation.txt'
if not PATH_NERNST_EQUATION_INFO.exists():
raise FileNotFoundError(PATH_NERNST_EQUATION_INFO.as_posix() + ' not found!')
if not PATH_GOLDMAN_EQUATION_INFO.exists():
raise FileNotFoundError(PATH_GOLDMAN_EQUATION_INFO.as_posix() + ' not found!')
|
gabrielriqu3ti/biomedical_signal_processing
|
biomedical_signal_processing/info/__init__.py
|
__init__.py
|
py
| 670 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34913449577
|
import numpy as np
import scipy.integrate
import scipy.optimize
import bokeh.plotting
from bokeh.plotting import figure, output_file, show
import bokeh.io
from bokeh.models import Span
def dilute(molecule_diluted,molecules_0,DR=0.2): #input Object want to dilute and where the parameters is stored
molecules_0[molecule_diluted.idx] *= (1-DR)
def replenish(molecule_replenished, molecules_0, DR=0.2) : #input Object want to replenish and where the parameters is stored
molecules_0[molecule_replenished.idx] += molecule_replenished.lc * DR
def dilute_species(molecules_diluted,molecules_0,DR=0.2): #dilute a list of molecules
for molecule in (molecules_diluted):
dilute(molecule,molecules_0,DR)
def replenish_species(molecules_replenished, molecules_0, DR=0.2) : #replenish a list of molecules
for molecule in (molecules_replenished):
replenish(molecule,molecules_0,DR)
def run_model(model,t,parameters_list,molecules_0,dilute_list,replenish_list,result_all,DR=0.2):
start_cycle,end_cycle = np.array(t)*4
for n in range (start_cycle,end_cycle):
#define time
t_start= n*15
t_end = (n+1)*15
t= np.linspace(t_start,t_end,2)
#solve equation and save result
result = scipy.integrate.odeint(model, molecules_0, t, args=parameters_list)
result_all = np.append(result_all,result[1])
#update parameter
molecules_0 = result.transpose()[:,-1]
#dilution
###diute out
dilute_species((dilute_list),molecules_0,DR)
###replenish
replenish_species((replenish_list),molecules_0,DR)
return result_all,molecules_0
def plot_result(molecule):
t = np.linspace(0, 15*(len(molecule)-1), len(molecule))
p = bokeh.plotting.figure(
plot_width=800,
plot_height=400,
x_axis_label="t",
y_axis_type="linear",
)
colors = bokeh.palettes.d3["Category10"][3]
# Populate glyphs
p.line(
t/60, molecule, line_width=2, color=colors[0]
)
vline1 = Span(location=4, dimension='height', line_color='black', line_width=1,line_dash='dashed')
vline2 = Span(location=16, dimension='height', line_color='black', line_width=1,line_dash='dashed')
p.add_layout(vline1)
p.add_layout(vline2)
show(p)
def plot_result_two_state(molecule):
t = np.linspace(0, 15*(len(molecule)-1), len(molecule))
p = bokeh.plotting.figure(
plot_width=800,
plot_height=400,
x_axis_label="t",
y_axis_type="linear",
)
colors = bokeh.palettes.d3["Category10"][3]
# Populate glyphs
p.line(
t/60, molecule, line_width=2, color=colors[0]
)
vline1 = Span(location=4, dimension='height', line_color='black', line_width=1,line_dash='dashed')
#vline2 = Span(location=16, dimension='height', line_color='black', line_width=1,line_dash='dashed')
p.add_layout(vline1)
#p.add_layout(vline2)
show(p)
|
william831015/GRN-in-chemostat
|
scripts/functions.py
|
functions.py
|
py
| 3,004 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4992423632
|
from __future__ import annotations
import typing
from homeassistant.components.switch import (
DOMAIN as PLATFORM_SWITCH,
SwitchEntity,
)
try:
from homeassistant.components.switch import SwitchDeviceClass
DEVICE_CLASS_OUTLET = SwitchDeviceClass.OUTLET
DEVICE_CLASS_SWITCH = SwitchDeviceClass.SWITCH
except:
from homeassistant.components.switch import DEVICE_CLASS_OUTLET, DEVICE_CLASS_SWITCH
from .merossclient import const as mc # mEROSS cONST
from . import meross_entity as me
if typing.TYPE_CHECKING:
from homeassistant.core import HomeAssistant
from homeassistant.config_entries import ConfigEntry
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_devices
):
me.platform_setup_entry(hass, config_entry, async_add_devices, PLATFORM_SWITCH)
class MLSwitch(me.MerossToggle, SwitchEntity):
"""
generic plugs (single/multi outlet and so)
"""
PLATFORM = PLATFORM_SWITCH
@staticmethod
def build_for_device(device: me.MerossDevice, channel: object, namespace: str):
return MLSwitch(device, channel, None, DEVICE_CLASS_OUTLET, None, namespace)
class ToggleXMixin(
me.MerossDevice if typing.TYPE_CHECKING else object
):
def __init__(self, api, descriptor, entry):
super().__init__(api, descriptor, entry)
# we build switches here after everything else have been
# setup since the togglex verb might refer to a more specialized
# entity than switches
togglex = descriptor.digest.get(mc.KEY_TOGGLEX)
if isinstance(togglex, list):
for t in togglex:
channel = t.get(mc.KEY_CHANNEL)
if channel not in self.entities:
MLSwitch.build_for_device(
self, channel, mc.NS_APPLIANCE_CONTROL_TOGGLEX
)
elif isinstance(togglex, dict):
channel = togglex.get(mc.KEY_CHANNEL)
if channel not in self.entities:
MLSwitch.build_for_device(
self, channel, mc.NS_APPLIANCE_CONTROL_TOGGLEX
)
# This is an euristhic for legacy firmwares or
# so when we cannot init any entity from system.all.digest
# we then guess we should have at least a switch
# edit: I guess ToggleX firmwares and on already support
# system.all.digest status broadcast
if not self.entities:
MLSwitch.build_for_device(self, 0, mc.NS_APPLIANCE_CONTROL_TOGGLEX)
def _handle_Appliance_Control_ToggleX(self, header: dict, payload: dict):
self._parse__generic(mc.KEY_TOGGLEX, payload.get(mc.KEY_TOGGLEX))
def _parse_togglex(self, payload: dict):
self._parse__generic(mc.KEY_TOGGLEX, payload)
class ToggleMixin(
me.MerossDevice if typing.TYPE_CHECKING else object
):
def __init__(self, api, descriptor, entry):
super().__init__(api, descriptor, entry)
# older firmwares (MSS110 with 1.1.28) look like dont really have 'digest'
# but have 'control' and the toggle payload looks like not carrying 'channel'
p_control = descriptor.all.get(mc.KEY_CONTROL)
if p_control:
p_toggle = p_control.get(mc.KEY_TOGGLE)
if isinstance(p_toggle, dict):
MLSwitch.build_for_device(
self,
p_toggle.get(mc.KEY_CHANNEL, 0),
mc.NS_APPLIANCE_CONTROL_TOGGLE,
)
if not self.entities:
MLSwitch.build_for_device(self, 0, mc.NS_APPLIANCE_CONTROL_TOGGLE)
def _handle_Appliance_Control_Toggle(self, header: dict, payload: dict):
self._parse_toggle(payload.get(mc.KEY_TOGGLE))
def _parse_toggle(self, payload):
"""
toggle doesn't have channel (#172)
"""
if isinstance(payload, dict):
entity: MLSwitch = self.entities[payload.get(mc.KEY_CHANNEL, 0)] # type: ignore
entity._parse_toggle(payload)
|
ZioTitanok/HomeAssistant-Configuration
|
custom_components/meross_lan/switch.py
|
switch.py
|
py
| 4,016 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71780096828
|
from colorfield.fields import ColorField
from django.core.validators import MinValueValidator, RegexValidator
from django.db import models
from users.models import User
class Ingredient(models.Model):
"""Класс интредиент"""
name = models.CharField(
verbose_name='Наименование ингредиента',
max_length=150,
help_text='Наименование ингредиента',
)
measurement_unit = models.CharField(
verbose_name='Единица измерения',
max_length=150,
help_text='Единица измерения',
)
class Meta:
ordering = ('name',)
verbose_name = 'Ингредиент'
verbose_name_plural = 'Ингредиенты'
def __str__(self):
return self.name
class Tag(models.Model):
"""Класс тег"""
name = models.CharField(
max_length=50,
verbose_name='Hазвание',
unique=True,
db_index=True
)
color = ColorField(
default='#17A400',
max_length=7,
verbose_name='Цвет',
unique=True,
help_text='Цвет в формате HEX кода',
)
slug = models.SlugField(
max_length=50,
verbose_name='slug',
unique=True,
validators=[RegexValidator(
regex=r'^[-a-zA-Z0-9_]+$',
message='Использован недопустимый символ'
)]
)
class Meta:
verbose_name = 'Тег'
verbose_name_plural = 'Теги'
ordering = ('name', )
def __str__(self):
return self.name
class Recipe(models.Model):
"""Класс рецепт"""
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='Автор',
related_name='recipes',
help_text='Автор рецепта',
)
name = models.CharField(
verbose_name='Название рецепта',
max_length=150,
help_text='Название рецепта',
)
image = models.ImageField(
verbose_name='Картинка',
upload_to='recipes/images',
help_text='Картинка',
)
text = models.TextField(
verbose_name='Описание',
help_text='Описание рецепта',
)
ingredients = models.ManyToManyField(
Ingredient,
verbose_name='Ингредиенты рецепта',
through='RecipeIngredient',
related_name='recipes',
help_text='Ингредиенты в составе рецепта',
)
tags = models.ManyToManyField(
Tag,
verbose_name='Тег рецепта',
related_name='recipes',
help_text='Тег рецепта',
)
cooking_time = models.IntegerField(
verbose_name='Время приготовления',
validators=[
MinValueValidator(
1,
message='Минимальное время приготовления 1 мин.'
)
],
help_text='Время приготовления',
)
pub_date = models.DateTimeField(
verbose_name='Дата публикации',
auto_now_add=True,
help_text='Дата публикации',
)
class Meta:
ordering = ('-pub_date', )
verbose_name = 'Рецепт'
verbose_name_plural = 'Рецепты'
def __str__(self):
return self.name
class RecipeIngredient(models.Model):
"""Класс рецепт-интредиент"""
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
verbose_name='Рецепт',
related_name='ingredient',
help_text='Рецепт',
)
ingredient = models.ForeignKey(
Ingredient,
on_delete=models.CASCADE,
verbose_name='Ингредиент',
related_name='ingredient',
help_text='Ингредиент',
)
amount = models.IntegerField(
verbose_name='Количество',
validators=[
MinValueValidator(
1,
message='Минимальное количество 1'
)
],
help_text='Количество',
)
class Meta:
ordering = ('recipe',)
verbose_name = 'Количество ингредиента'
verbose_name_plural = 'Количество ингредиентов'
constraints = [
models.UniqueConstraint(
fields=('recipe', 'ingredient', ),
name='unique_ingredient',
),
]
def __str__(self):
return f'{self.ingredient} в {self.ingredient.measurement_unit}'
class Follow(models.Model):
"""Класс подписки"""
follower = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='Подписчик',
related_name='follower',
help_text='Подписчик',
)
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='Автор',
related_name='author',
help_text='Автор',
)
class Meta:
ordering = ('-id',)
verbose_name = 'Подписка'
verbose_name_plural = 'Подписки'
constraints = [
models.UniqueConstraint(
fields=('follower', 'author', ),
name='unique_follow',
),
]
def __str__(self):
return f'{self.follower} подписался на: {self.author}'
class FavoriteRecipe(models.Model):
"""Класс избранное"""
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='Пользователь',
related_name='favorite',
help_text='Пользователь добавивший рецепт',
)
recipe = models.ForeignKey(
Recipe,
verbose_name='Избранное',
on_delete=models.CASCADE,
related_name='favorite',
help_text='Избранный рецепт',
)
class Meta:
ordering = ('id',)
verbose_name = 'Избранное'
verbose_name_plural = 'Избранные рецепты'
constraints = [
models.UniqueConstraint(
fields=('user', 'recipe', ),
name='unique_favorite',
),
]
def __str__(self):
return f'{self.recipe} добавлен в избранное'
class ShoppingCart(models.Model):
"""Класс покупок"""
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='Пользователь',
related_name='shopping',
help_text='Пользователь добавивший покупки',
)
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
verbose_name='Покупки',
related_name='shopping',
help_text='Рецепт для покупок',
)
class Meta:
ordering = ('id',)
verbose_name = 'Покупка'
verbose_name_plural = 'Покупки'
constraints = [
models.UniqueConstraint(
fields=('user', 'recipe', ),
name='unique_shopping',
),
]
def __str__(self):
return f'{self.recipe} добавлен в покупки.'
|
GirzhuNikolay/foodgram-project-react
|
backend/recipes/models.py
|
models.py
|
py
| 7,583 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
73811427708
|
from torch.utils.data import DataLoader
from sklearn.model_selection import KFold
from .datasets import get_cifar10_datasets, get_cifar100_datasets, get_mnist_datasets, get_image_net_dataset, TruncatedDataset, MergedDataset
from .partition import partition_by_class, partition_with_dirichlet_distribution
data_path = '/home/hansel/developer/embedding/data/'
def get_datasets(data_dir, dataset, use_hdf5=False):
if dataset == 'cifar10':
trn_dataset, val_dataset = get_cifar10_datasets(data_dir=data_dir, use_hdf5=use_hdf5)
elif dataset == 'cifar100':
trn_dataset, val_dataset = get_cifar100_datasets(data_dir=data_dir)
elif dataset == 'mnist':
trn_dataset, val_dataset = get_mnist_datasets(data_dir=data_dir, use_hdf5=use_hdf5)
elif dataset == 'imagenet':
trn_dataset, val_dataset = get_image_net_dataset(data_dir=data_dir)
return trn_dataset, val_dataset
def get_dl_lists(dataset, batch_size, partition=None, n_site=None, alpha=None, net_dataidx_map_train=None, net_dataidx_map_test=None, shuffle=True, k_fold_val_id=None, seed=None, site_indices=None, use_hdf5=True):
trn_dataset, val_dataset = get_datasets(data_dir=data_path, dataset=dataset, use_hdf5=use_hdf5)
if partition == 'regular':
trn_ds_list = [TruncatedDataset(trn_dataset, dataset) for _ in range(n_site)]
val_ds_list = [TruncatedDataset(val_dataset, dataset) for _ in range(n_site)]
elif partition == 'by_class':
(net_dataidx_map_train, net_dataidx_map_test) = partition_by_class(data_dir=data_path, dataset=dataset, n_sites=n_site, seed=seed)
trn_ds_list = [TruncatedDataset(trn_dataset, dataset, idx_map) for idx_map in net_dataidx_map_train.values()]
val_ds_list = [TruncatedDataset(val_dataset, dataset, idx_map) for idx_map in net_dataidx_map_test.values()]
elif partition == 'dirichlet':
(net_dataidx_map_train, net_dataidx_map_test) = partition_with_dirichlet_distribution(data_dir=data_path, dataset=dataset, n_sites=n_site, alpha=alpha, seed=seed)
trn_ds_list = [TruncatedDataset(trn_dataset, dataset, idx_map) for idx_map in net_dataidx_map_train.values()]
val_ds_list = [TruncatedDataset(val_dataset, dataset, idx_map) for idx_map in net_dataidx_map_test.values()]
elif partition == 'given':
trn_ds_list = [TruncatedDataset(trn_dataset, dataset, idx_map) for idx_map in net_dataidx_map_train.values()]
val_ds_list = [TruncatedDataset(val_dataset, dataset, idx_map) for idx_map in net_dataidx_map_test.values()]
elif partition == '5foldval':
trn_ds_list = [TruncatedDataset(trn_dataset, dataset, idx_map) for idx_map in net_dataidx_map_train.values()]
val_ds_list = [TruncatedDataset(val_dataset, dataset, idx_map) for idx_map in net_dataidx_map_test.values()]
merged_ds_list = [MergedDataset(trn_ds_list[i], val_ds_list[i], dataset) for i in range(len(trn_ds_list))]
kfold = KFold(n_splits=5, shuffle=True, random_state=1)
splits = [list(kfold.split(range(len(merged_ds)))) for merged_ds in merged_ds_list]
indices = [split[k_fold_val_id] for split in splits]
trn_ds_list = [TruncatedDataset(merged_ds_list[i], dataset, idx_map[0]) for i, idx_map in enumerate(indices)]
val_ds_list = [TruncatedDataset(merged_ds_list[i], dataset, idx_map[1]) for i, idx_map in enumerate(indices)]
trn_dl_list = [DataLoader(dataset=trn_ds, batch_size=batch_size, shuffle=shuffle, pin_memory=True, num_workers=0) for trn_ds in trn_ds_list]
val_dl_list = [DataLoader(dataset=val_ds, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=0) for val_ds in val_ds_list]
if site_indices is not None:
trn_dl_list = [trn_dl_list[i] for i in site_indices]
val_dl_list = [val_dl_list[i] for i in site_indices]
return trn_dl_list, val_dl_list
|
somcogo/embedding
|
utils/data_loader.py
|
data_loader.py
|
py
| 3,860 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22610043366
|
from django import forms
from django.forms.models import inlineformset_factory
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Field, Fieldset, Div, HTML, Submit, Button
from hybridjango.custom_layout_object import *
from hybridjango.mixins import BootstrapFormMixin
from .models import *
class EventCommentForm(forms.ModelForm):
class Meta:
model = EventComment
fields = ['event', 'text']
class EventForm(forms.ModelForm):
class Meta:
model = Event
fields = [
'title',
'type',
'ingress',
'text', # TODO: Make this field a HTMLField in form
'image',
'event_start',
'event_end',
'weight',
'location',
'hidden',
'news',
'public',
'signoff_close_on_signup_close',
'signoff_close',
]
widgets = {
'ingress': forms.Textarea(attrs={'rows': 3}),
}
def __init__(self, *args, **kwargs):
super(EventForm, self).__init__(*args, **kwargs)
for field in self.fields:
if self.fields[field] == self.fields['event_start'] or self.fields[field] == self.fields['event_end']:
self.fields[field].widget.attrs.update({'class': 'form_datetime', 'autocomplete': 'off'})
else:
self.fields[field].widget.attrs.update({'class': 'form-control'})
class MarkPunishmentForm(forms.ModelForm, BootstrapFormMixin):
class Meta:
model = MarkPunishment
exclude = [
'rules',
'delays',
'duration',
]
def __init__(self, *args, **kwargs):
super(MarkPunishmentForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = True
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-3 create-label'
self.helper.field_class = 'col-md-9'
self.helper.layout = Layout(
Div(
Field('goes_on_secondary'),
Field('too_many_marks'),
Field('signoff_close'),
HTML("<br>"),
Field('mark_on_late_signoff'),
HTML("<br>"),
Field('remove_on_too_many_marks'),
HTML("<br>"),
HTML("<br>"),
Fieldset('Add delays',
Formset('delays')),
HTML("<br>"),
Fieldset('Add rules',
Formset('rules')),
HTML("<br>"),
Submit('submit', 'Lagre'),
Button('back', "Tilbake", css_class='btn btn-default pull-right', onclick="goBack()"),
)
)
class RuleForm(forms.ModelForm, BootstrapFormMixin):
class Meta:
model = Rule
exclude = [
'punishment',
]
RuleFormSet = inlineformset_factory(
MarkPunishment, Rule, form=RuleForm,
fields=['rule'], extra=1, can_delete=True
)
class DelayForm(forms.ModelForm, BootstrapFormMixin):
class Meta:
model = Delay
exclude = [
'punishment',
]
DelayFormSet = inlineformset_factory(
MarkPunishment, Delay, form=DelayForm,
fields=['marks', 'minutes'], extra=1, can_delete=True
)
|
hybrida/hybridjango
|
apps/events/forms.py
|
forms.py
|
py
| 3,382 |
python
|
en
|
code
| 4 |
github-code
|
6
|
14205447601
|
#!/usr/bin/env python
# coding: utf-8
# ## Single Dimension Array
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
# In[2]:
data = [10, 23, 34, 35, 45, 59]
df = pd.DataFrame(data, columns=['Score'])
df
# In[3]:
#plt.pie(df)
plt.pie(df, labels=df['Score'])
plt.title("Students Score")
plt.show()
# In[4]:
label_name = ['John', 'Tim', 'Kenny', 'AK', 'Helvetica', 'Bryan']
# In[5]:
plt.pie(df, labels=label_name)
plt.title("Students Score")
plt.show()
# In[6]:
plt.pie(df, labels=label_name, autopct='%1.1f%%')
plt.title("Students Score")
plt.show()
# In[7]:
plt.pie(df, labels=label_name, autopct='%1.2f%%')
plt.title("Students Score")
plt.show()
# In[8]:
plt.pie(df, labels=label_name, autopct='%1.3f%%')
plt.title("Students Score")
plt.show()
# ## Two Dimension Array
# In[9]:
new_data = [['John', 10], ['Tim', 24], ['AK', 34]]
# In[10]:
new_data
# In[11]:
newdf = pd.DataFrame(new_data)
# In[12]:
newdf
# In[13]:
newdf = pd.DataFrame(new_data, columns=['Name', 'Score'])
# In[14]:
newdf
# In[15]:
newdf['Score']
# In[16]:
newdf['Name']
# In[17]:
plt.pie(newdf['Score'], labels=newdf['Name'], autopct='%1.1f%%')
plt.show()
# In[ ]:
# In[ ]:
|
AileshC/PythonLearning
|
python_notebooks/MatPlotLib_Pie_Demo.py
|
MatPlotLib_Pie_Demo.py
|
py
| 1,233 |
python
|
en
|
code
| 1 |
github-code
|
6
|
38116639709
|
print("'0' for exit")
#take ch input from the user
ch = input('ch: ')
if (ch == '0'):
exit()
elif ch.isnumeric():
print('digit')
elif ch.isalpha():
print('alphabet')
else:
print('neither alphabet nor digit')
|
3Sangeetha3/python
|
if_elif_else.py
|
if_elif_else.py
|
py
| 224 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6629686746
|
import tensorflow as tf
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
#Configure GPU
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
print(tf.config.experimental.get_memory_growth(gpu))
from tensorflow.keras import (models, layers, datasets, callbacks, optimizers,
initializers, regularizers)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
import os
import time
import numpy as np
from six import iteritems
from time import perf_counter
import ml_genn as mlg
from ml_genn import Model
from ml_genn.utils import parse_arguments, raster_plot
#Separable convolutional components MobilenetV1
def SeparableConv( x , num_filters , strides , alpha=1.0 ):
planes = int(num_filters*alpha)
x.append(layers.Conv2D(planes, kernel_size=1, strides=1, padding="same", activation='relu', use_bias=False, kernel_initializer=initializer))
x.append(layers.Conv2D(planes, kernel_size=3, strides=strides, padding="same", groups=planes, activation='relu', use_bias=False, kernel_initializer=initializer))
x.append(layers.Conv2D(planes, kernel_size=1, strides=1, padding="same", activation='relu', use_bias=False, kernel_initializer=initializer))
return x
#Convolutional components MobilenetV1
def Conv( x , num_filters , kernel_size , strides=1 , alpha=1.0 ):
x.append(layers.Conv2D( int(num_filters * alpha ) , kernel_size=kernel_size , strides=strides , activation='relu', use_bias=False , padding='same', kernel_initializer=initializer))
return x
if __name__ == '__main__':
args = parse_arguments('AlexNet classifier model')
print('arguments: ' + str(vars(args)))
#check if tensorflow is running on GPU
print(tf.test.is_gpu_available())
print(tf.test.is_built_with_cuda())
n_norm_samples=1000
#Load Dataset
(x_train, y_train), (x_test, y_test) = datasets.cifar10.load_data()
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
#Normalize data
x_train = x_train / 255.0
x_test = x_test / 255.0
encoder = OneHotEncoder()
encoder.fit(y_train)
y_train = encoder.transform(y_train).toarray()
y_test = encoder.transform(y_test).toarray()
index_norm=np.random.choice(x_train.shape[0], n_norm_samples, replace=False)
x_norm = x_train[index_norm]
y_norm = y_train[index_norm]
# Create L2 regularizer
regularizer = regularizers.l2(0.01)
# Create image data generator
data_gen = ImageDataGenerator(width_shift_range=0.3,height_shift_range=0.8,rotation_range=30,zoom_range=0.1,
shear_range=0.01)
# Get training iterator
iter_train = data_gen.flow(x_train, y_train, batch_size=256)
initializer="he_uniform"
#Creation Model
layers_mobilenetv1 =[
layers.Conv2D(32,3, strides=1, activation='relu', padding="same", use_bias=False, input_shape=x_train.shape[1:])
]
layers_mobilenetv1 = Conv(layers_mobilenetv1,num_filters=32 , kernel_size=3 , strides=1 )
layers_mobilenetv1 = SeparableConv( layers_mobilenetv1, num_filters=32 , strides=1 )
layers_mobilenetv1 = Conv( layers_mobilenetv1, num_filters=64 , kernel_size=1 )
layers_mobilenetv1 = SeparableConv( layers_mobilenetv1, num_filters=64 , strides=1 )
layers_mobilenetv1 = Conv( layers_mobilenetv1, num_filters=128 , kernel_size=1 )
layers_mobilenetv1 = SeparableConv( layers_mobilenetv1, num_filters=128 , strides=1 )
layers_mobilenetv1 = Conv(layers_mobilenetv1, num_filters=128 , kernel_size=1 )
layers_mobilenetv1 = SeparableConv( layers_mobilenetv1, num_filters=128 , strides=2 )
layers_mobilenetv1 = Conv( layers_mobilenetv1, num_filters=256 , kernel_size=1 )
layers_mobilenetv1 = SeparableConv( layers_mobilenetv1, num_filters=256 , strides=1 )
layers_mobilenetv1 = Conv( layers_mobilenetv1, num_filters=256 , kernel_size=1 )
layers_mobilenetv1 = SeparableConv( layers_mobilenetv1, num_filters=256 , strides=2 )
layers_mobilenetv1 = Conv( layers_mobilenetv1, num_filters=512 , kernel_size=1 )
for i in range( 5 ):
layers_mobilenetv1 = SeparableConv( layers_mobilenetv1, num_filters=256 , strides=1 )
layers_mobilenetv1 = Conv( layers_mobilenetv1, num_filters=512 , kernel_size=1 )
layers_mobilenetv1 = SeparableConv(layers_mobilenetv1, num_filters=512 , strides=2 )
layers_mobilenetv1 = Conv(layers_mobilenetv1, num_filters=1024 , kernel_size=1 )
layers_mobilenetv1 = SeparableConv(layers_mobilenetv1, num_filters=1024 , strides=2 )
layers_mobilenetv1 = Conv(layers_mobilenetv1, num_filters=1024 , kernel_size=1 )
layers_mobilenetv1.append(layers.GlobalAveragePooling2D())
layers_mobilenetv1.append(layers.Dense(10,activation='softmax', use_bias=False))
tf_model = models.Sequential(layers_mobilenetv1,name="mobilenetv1")
tf_model.summary()
if args.reuse_tf_model:
tf_model = models.load_model('mobilenetv1')
else:
optimizer = optimizers.SGD(lr=0.05, momentum=0.9)
tf_model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
checkpoint_path = "training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1,
save_best_only=True,
monitor='val_accuracy')
#train TensorFlow model
steps_per_epoch = x_train.shape[0] // 256
tf_model.fit(iter_train, steps_per_epoch=steps_per_epoch, epochs=200, callbacks=cp_callback, validation_data=(x_test,y_test))
#Save Mobilenetv1_tf_model
models.save_model(tf_model, 'mobilenetv1', save_format='h5')
#Evaluate TensorFlow model
tf_model.evaluate(x_test, y_test)
tf_eval_start_time = perf_counter()
tf_model.evaluate(x_test, y_test)
print("TF evaluation:%f" % (perf_counter() - tf_eval_start_time))
# Convert and compile ML GeNN model
converter = args.build_converter(x_norm, K=10, norm_time=500)
# Convert and compile ML GeNN model
mlg_model = Model.convert_tf_model(
tf_model, converter=converter, connectivity_type=args.connectivity_type,
input_type=args.input_type, dt=args.dt, batch_size=args.batch_size, rng_seed=args.rng_seed,
kernel_profiling=args.kernel_profiling)
time = 10 if args.converter == 'few-spike' else 500
mlg_eval_start_time = perf_counter()
acc, spk_i, spk_t = mlg_model.evaluate([x_test], [y_test], time, save_samples=args.save_samples)
print("MLG evaluation:%f" % (perf_counter() - mlg_eval_start_time))
if args.kernel_profiling:
print("Kernel profiling:")
for n, t in iteritems(mlg_model.get_kernel_times()):
print("\t%s: %fs" % (n, t))
# Report ML GeNN model results
print('Accuracy of MobileNetv1 GeNN model: {}%'.format(acc[0]))
if args.plot:
neurons = [l.neurons.nrn for l in mlg_model.layers]
raster_plot(spk_i, spk_t, neurons, time=time)
|
jfgf11/ml_genn_examples_ssh
|
Sequential API/mobilenetv1.py
|
mobilenetv1.py
|
py
| 7,524 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43095956138
|
import plugins
import sys
import data
import model
plugins.load_all('config.json')
target = sys.argv[1]
start_node = model.Node('person', target)
d = data.Storage(target)
d.add_node(start_node)
def handle(tokens):
if tokens[0].lower() == 'list':
# show list of nodes
if len(tokens) > 1:
if tokens[1].lower() == 'nodes':
nodes = d.get_nodes()
print('\n'.join(map(lambda x: str(x), nodes)))
elif tokens[1].lower() == 'actions':
if len(tokens) > 2:
actions = plugins.fetch_actions(tokens[2])
print('\n'.join(map(lambda x: str(x), actions)))
else:
print('USAGE: list actions NODE_TYPE')
else:
print('USAGE: list (nodes | actions NODE_TYPE)')
elif tokens[0].lower() == 'get':
if len(tokens) > 1:
id = int(tokens[1])
result = d.get_node(id)
print(result)
print(result.data_json)
else:
print('USAGE: get NODE_ID')
elif tokens[0].lower() == 'run':
# run plugin
if len(tokens) > 2:
action_name = tokens[1]
action = plugins.fetch(action_name)
target_id = int(tokens[2])
target = d.get_node(target_id)
result = action['func'](target)
for n in result:
d.add_node(n)
connection = model.Connection(target.id, n.id, action_name, 'concrete', '')
d.add_connection(connection)
d.add_node(target)
print(result)
else:
print('USAGE: run ACTION NODE_ID')
else:
print('< Unknown command: ' + command)
while True:
command = raw_input('> ')
tokens = command.split(' ')
if tokens[0].lower() == 'quit':
break
else:
try:
handle(tokens)
except Exception as e:
print(e)
|
tracer-sec/osint
|
console.py
|
console.py
|
py
| 2,022 |
python
|
en
|
code
| 8 |
github-code
|
6
|
39540715020
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 15 2021
@author: sagrana
"""
from rest_framework import status
from rest_framework.generics import CreateAPIView, RetrieveAPIView
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from .models import User
from .serializers import UserRegistrationSerializer
from .serializers import UserLoginSerializer
class UserRegistrationView(CreateAPIView):
""""UserRegistrationView
"""
serializer_class = UserRegistrationSerializer
permission_classes = (AllowAny,)
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
response = {
'success': 'True',
'status code': status.HTTP_200_OK,
'message': 'User registered successfully',
}
status_code = status.HTTP_200_OK
return Response(response, status=status_code)
class UserLoginView(RetrieveAPIView):
"""UserLoginView
"""
permission_classes = (AllowAny,)
serializer_class = UserLoginSerializer
queryset = User.objects.all()
def post(self, request):
"""post
:param request:
:return:
"""
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
response = {
'success': 'True',
'status code': status.HTTP_200_OK,
'message': 'User logged in successfully',
'token': serializer.data['token'],
}
status_code = status.HTTP_200_OK
return Response(response, status=status_code)
|
theRuthless/stark_ly3000_web_app
|
backend/users/views.py
|
views.py
|
py
| 1,740 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35007798704
|
from src.main.python.Solution import Solution
# Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0?
# Find all unique triplets in the array which gives the sum of zero.
#
# Note:
# Elements in a triplet (a,b,c) must be in non-descending order. (ie, a ≤ b ≤ c)
# The solution set must not contain duplicate triplets.
#
# For example, given array S = {-1 0 1 2 -1 -4},
# A solution set is:
# (-1, 0, 1)
# (-1, -1, 2)
class Q015(Solution):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
ans = []
if nums and len(nums) >= 3:
nums.sort()
i = 0
while i < len(nums)-2:
j, k = i+1, len(nums)-1
while j < k:
sum = nums[i] + nums[j] + nums[k]
if sum == 0:
ans.append([nums[i], nums[j], nums[k]])
k -= 1
while j < k < len(nums)-1 and nums[k] == nums[k+1]:
k -= 1
j += 1
while k > j and nums[j] == nums[j-1]:
j += 1
elif sum < 0:
j += 1
else:
k -= 1
i += 1
while i < len(nums)-2 and nums[i] == nums[i-1]:
i += 1
return ans
|
renkeji/leetcode
|
python/src/main/python/Q015.py
|
Q015.py
|
py
| 1,482 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27811540343
|
"""
쳅터: day 5
주제: 재귀함수(recursion)
자기 자신을 호출하는 함수
문제:
A. 팩토리얼 계산 함수 fact를 재귀한수로 정의하여, fact(5)를 호출한 결과를 출력하라
작성자: 윤경환
작성일: 18 10 10
"""
def fact(a): #팩토리얼
if a == 1: #a가 1일때
return a #a반환
else: #아닐때
return a*fact(a-1) #재귀함수 사용
print(fact(5)) #출력
|
younkyounghwan/python_class
|
lab5_13.py
|
lab5_13.py
|
py
| 427 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
70264789629
|
import django, os
from django.core.management import call_command
from dotenv import load_dotenv
def init_db():
"""Method to initialize the database with sample data"""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FriendsLessonsSystem.settings')
load_dotenv()
django.setup()
from FriendsLessonsAPI.models import User, Course, Enrollment
call_command('flush')
call_command('makemigrations')
call_command('migrate')
joe = User.objects.create(first_name='Joe', last_name='Smith', username='joe123', birth_date='2000-01-01')
mark = User.objects.create(first_name='Mark', last_name='Johnson', username='mark456', birth_date='1999-12-31')
jody = User.objects.create(first_name='Jody', last_name='Williams', username='jody789', birth_date='1998-12-30')
rachel = User.objects.create(first_name='Rachel', last_name='Smith', username='rachel246', birth_date='1997-12-29')
jane = User.objects.create(first_name='Jane', last_name='Doe', username='jane512', birth_date='1995-05-01')
joe.friends.add(mark, jody, rachel)
jane.friends.add(joe, rachel)
math = Course.objects.create(name='Math', description='Math course')
spanish = Course.objects.create(name='Spanish', description='Spanish course')
history = Course.objects.create(name='History', description='History course')
Enrollment.objects.create(user=rachel, course=math, lessons_taken=3)
Enrollment.objects.create(user=rachel, course=spanish, lessons_taken=2)
Enrollment.objects.create(user=jane, course=history, lessons_taken=10)
Enrollment.objects.create(user=jane, course=math, lessons_taken=1)
Enrollment.objects.create(user=jane, course=spanish, lessons_taken=5)
Enrollment.objects.create(user=joe, course=spanish, lessons_taken=1)
if __name__ == '__main__':
init_db()
|
ValentinGiorgetti/Desafio-Backend
|
FriendsLessonsSystem/init_db.py
|
init_db.py
|
py
| 1,830 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14254446016
|
from __future__ import absolute_import, division, print_function, unicode_literals
from _GTW import GTW
from _TFL._Meta.Once_Property import Once_Property
import _GTW._RST._TOP._elFinder
class Error (Exception) :
"""elFinder error message"""
def __init__ (self, code, data = None) :
self.code = code
self.data = data
# end def __init__
@Once_Property
def json_cargo (self) :
if self.data :
return [self.code, self.data]
return self.code
# end def json_cargo
# end class Error
if __name__ != "__main__" :
GTW.RST.TOP.elFinder._Export ("*")
### __END__ GTW.RST.TOP.elFinder.Error
|
xiaochang91/tapyr
|
_GTW/_RST/_TOP/_elFinder/Error.py
|
Error.py
|
py
| 689 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2415322860
|
import gtk
import gobject
from tryton.gui.window.view_form.view.form import ViewForm
from tryton.gui.window.view_form.view.form_gtk.widget import Widget
from tryton.gui.window.view_form.screen import Screen
from tryton.common.selection import SelectionMixin
from tryton.common.treeviewcontrol import MOVEMENT_KEYS
def get_plugins(model):
return []
class Many2ManySelection(Widget, SelectionMixin):
expand = True
def __init__(self, view, attrs):
super(Many2ManySelection, self).__init__(view, attrs)
self.widget = gtk.VBox(homogeneous=False, spacing=5)
hbox = gtk.HBox(homogeneous=False, spacing=0)
hbox.set_border_width(2)
label = gtk.Label(attrs.get('string', ''))
label.set_alignment(0.0, 0.5)
hbox.pack_start(label, expand=True, fill=True)
frame = gtk.Frame()
frame.add(hbox)
frame.set_shadow_type(gtk.SHADOW_OUT)
self.widget.pack_start(frame, expand=False, fill=True)
self.screen = Screen(attrs['relation'],
view_ids=attrs.get('view_ids', '').split(','),
mode=['tree'], views_preload=attrs.get('views', {}))
self.screen.new_group()
self.treeview = self.screen.current_view.treeview
self.treeview.get_selection().connect('changed', self.changed)
self.treeview.connect('focus-out-event', lambda *a: self._focus_out())
self.treeview.connect('button-press-event', self.button_press_event)
self.treeview.connect('key-press-event', self.key_press_event)
self.widget.pack_start(self.screen.widget, expand=True, fill=True)
self.nullable_widget = False
self.init_selection()
@property
def modified(self):
if self.record and self.field:
group = set(r.id for r in self.field.get_client(self.record))
value = set(self.get_value())
return value != group
return False
def changed(self, selection):
def focus_out():
if self.widget.props.window:
self._focus_out()
# Must be deferred because it triggers a display of the form
gobject.idle_add(focus_out)
def button_press_event(self, treeview, event):
# grab focus because it doesn't whith CONTROL MASK
treeview.grab_focus()
if event.button == 1:
event.state ^= gtk.gdk.CONTROL_MASK
def key_press_event(self, treeview, event):
if event.keyval in MOVEMENT_KEYS:
event.state ^= gtk.gdk.CONTROL_MASK
def get_value(self):
return [r.id for r in self.screen.selected_records]
def set_value(self, record, field):
field.set_client(record, self.get_value())
def display(self, record, field):
selection = self.treeview.get_selection()
selection.handler_block_by_func(self.changed)
try:
self.update_selection(record, field)
super(Many2ManySelection, self).display(record, field)
if field is None:
self.screen.clear()
self.screen.current_record = None
self.screen.parent = None
else:
self.screen.parent = record
current_ids = [r.id for r in self.screen.group]
new_ids = [s[0] for s in self.selection]
if current_ids != new_ids:
self.screen.clear()
self.screen.load(new_ids)
group = field.get_client(record)
nodes = [[r.id] for r in group
if r not in group.record_removed
and r not in group.record_deleted]
selection.unselect_all()
self.screen.current_view.select_nodes(nodes)
self.screen.display()
finally:
selection.handler_unblock_by_func(self.changed)
ViewForm.WIDGETS['many2many_selection'] = Many2ManySelection
|
PierreCookie/tryton_pre_mono
|
tryton/plugins/many2many_selection.py
|
many2many_selection.py
|
py
| 3,934 |
python
|
en
|
code
| null |
github-code
|
6
|
2825738960
|
# https://www.codewars.com/kata/51c8e37cee245da6b40000bd/train/python
# Complete the solution so that it strips all text that follows any of a set of comment markers passed in. Any whitespace at the end of the line should also be stripped out.
# Example:
# Given an input string of:
# apples, pears # and bananas
# grapes
# bananas !apples
# The output expected would be:
# apples, pears
# grapes
# bananas
# The code would be called like so:
# result = solution("apples, pears # and bananas\ngrapes\nbananas !apples", ["#", "!"])
# # result should == "apples, pears\ngrapes\nbananas"
import re
def solution(string,markers):
new_str = string.split('\n')
to_find = '|'.join(markers)
compiled = re.compile('.*(?=('+ to_find + ')*)')
print(compiled)
final = [compiled.findall(i)[0].strip() if compiled.findall(i) else i for i in new_str]
return '\n'.join(final)
print(solution('apples, pears # and bananas\ngrapes\nbananas #!apples', ['#','!']))
|
Tadiuz/PythonPrograms
|
PP/CodeWars/Strip_Comment.py
|
Strip_Comment.py
|
py
| 998 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7930998505
|
import numpy as np
import matplotlib.pyplot as plt
# seulement deux etats
def epidemie_1(temps = 50, population = 10**6):
propagation = np.array( [[0.9 , 0.1], [0.3, 0.7]]) # 0 -> infecte, 1 -> sain
popu = np.array([0, 1])
X_temps = np.linspace(0, temps, temps)
Y_infectes = []
for t in range(temps):
Y_infectes.append(popu[0]*population)
popu = np.dot(popu, propagation)
plt.plot(X_temps, Y_infectes)
# illustration de markov deux etats
# modele irrealiste a cause de la pente, voir site worldometers.info juste pour donner une impression de ce que ca devrait donner pas pour donner
#epidemie_1()
#plt.show()
# 5 etats cette fois:
def epidemie_2(temps = 100, population = 10**6):
propagation = np.array( [
[0.7, 0.2, 0, 0, 0.0001, 0.0999], # 0 -> infecte vaccine
[0.2, 0.8, 0, 0, 0, 0], # 1 -> sain vaccine
[0 , 0.2, 0.1, 0.7, 0, 0], # 2 -> sain non vaccin
[0,0.2, 0, 0.7, 0.001, 0.099], # 3 -> infecte non vaccine
[0, 0, 0, 0, 1, 0 ], # 4 -> mort
[0, 0, 0, 0, 0, 1 ] # 5 -> immunise
])
popu = np.array([0, 0, 1, 0, 0, 0])
X_temps = np.linspace(0, temps, temps)
Y_infectes = []
for t in range(temps):
Y_infectes.append(popu[0]*population)
popu = np.dot(popu, propagation)
plt.plot(X_temps, Y_infectes)
#epidemie_2()
#plt.show()
# modele bcp plus complique car bcp d'etats mais
# resultats bien plus satisfaisant
# on peut jouer sur la propagation de l'epidemie
# qui prennent en compte la reaction des gens, des gouvernements, le confinement ( notamment probabilite d'infection qui descend, celle de vaccination monte, mais aussi mutation aleatoire)
# Chaine de Markov cachee
# changement de domaine
def max_arg(liste):
""" renvoie le max de la liste et le plus petit indice ou il a ete realise"""
m = liste[0]
i_max = 0
for n in range(len(liste)):
if liste[n] > m:
m = liste[n]
i_max = n
return m, i_max
def Viterbi(A, B, Obs):
# A matrice de transition
# B les probabilites d'observation tq b_j(o_t) = B[j][t]
# On travaille avec des logarithmes
logA = np.log(A)
logB = np.log(B)
N = len(A)
T = len(Obs)
pointeurs = np.reshape(np.zeros(T*N), (N,T)) # sert a retracer le chemin a la fin
alpha_prec = np.array(B[:][Obs[0]])
alpha_suiv = np.zeros(N)
for t in range(T):
nouv_alpha = np.zeros(N)
for j in range(N):
nouv_alpha[j], pointeurs[j][t] = max_arg( np.array( np.log(alpha_suiv[i]) + logA[i][j] + logB[j][Obs[t]] for i in range(N)))
# log est croissante, conserve donc le max
# on met en pointeur l'etat i qui realise le maximum : c'etait l'etat precedent
alpha_prec = alpha_suiv[:]
alpha_suiv = nouv_alpha[:]
pmax, i_final = max_arg(alpha_suiv)
pmax = np.exp(pmax)
etats_successifs = np.zeros(T)
i = i_final
for t in range(1, T+1, -1):
etats_successifs[t] = i
i = pointeurs[i][t-1]
return pmax, etats_successifs
def forward(A, B, Obs):
N = len(A)
T = len(B)
alpha_prec = np.array(B[:][Obs[0]])
alpha_suiv = np.zeros(N)
for t in range(T):
nouv_alpha = np.zeros(N)
for j in range(N):
nouv_alpha[j] = B[j][Obs[t]] * sum( alpha_suiv[i] * A[i][j] for i in range(N))
alpha_prec = alpha_suiv[:]
alpha_suiv = nouv_alpha[:]
return sum(alpha_suiv)
# def baum-welch(A,B, Obs):
# if # condition de convergence
# else:
# N = len(A)
# T = len(Obs)
# alphas = np.reshape(np.zeros(N*T), (N, T))
# betas = np.reshape(np.zeros(N*T), (N, T))
# # a initialiser correctement avec un for ICI
# C = sum(alphas)
# D = sum(beta)
# alphas = alphas/C
# beta = beta/D
# for t in range(1, T): # on construit
# for i in range(N):
# alphas[i][t] = B[i][Obs[t]] *
# betas[i][t] =
# ABANDON MOMOENTANE
def baum_welch_naif(A, B, Obs):
N = len(A)
T = len(Obs)
alphas = np.reshape(np.zeros(N*T), (T, N))
betas = np.reshape(np.zeros(N*T), (T, N))
# trouver toutes les valeurs des alphas et betas
alphas[:][0] = B[:][Obs[0]]
betas[T-1][:] = np.ones(N)
for t in range(1, T-2):
for j in range(N):
alphas[t][j] = B[j][Obs[t]] * sum( alphas[t-1][i] * A[i][j] for i in range(N))
betas[T-1-t][j] = B[Obs[T-t]][j] * sum( betas[T-t][i] * A[j][i] for i in range(N))
Pobs = sum(alphas[T-1][:])
# step Expectations
zeta = np.reshape(np.zeros(N*N*T), (T,N, N))
gamma = np.reshape(np.zeros(N*T), (T,N))
for t in range(T-1):
for i in range(N):
for j in range(N):
zeta[t][i][j] = alphas[t][i] * betas[t+1][j] * A[i][j] * B[Obs[t]][j] / Pobs
for t in range(T):
for i in range(N):
gamma[t][i] = (alphas[t][j] * betas[t][j]) / Pobs
#step S
nouvA = np.reshape(np.zeros(N**2), (N,N))
nouvB = np.reshape(np.zeros(N * len(B[0])), (N, len(B[0])))
for i in range(N):
denom = sum( sum( zeta[t][i][k] for k in range(N)) for t in range(T))
for j in range(N):
nouvA[i][j] = sum( zeta[t][i][j] for t in range(T)) / denom
for j in range(N):
for k in range(len(B)):
denom = sum(gamma[t][j] for t in range(T))
for t in range(T):
if Obs[t] == k:
nouvB[j][k] = nouvB[j][k] + gamma[t][j] / denom
return nouvA, nouvB
def traite_fichier_adn():
nucleotide = open("adn_pur.txt", "r")
nombres = open("adn_traite", "a")
lignes = nucleotide.readlines()
N = ['a', 'c', 't', 'g']
for l in lignes:
for carac in l:
if carac == 'a':
nombres.write("0 ")
if carac == 'c':
nombres.write("1 ")
if carac == 't':
nombres.write("2 ")
if carac == 'g':
nombres.write("3 ")
nucleotide.close()
nombres.close()
adn = open("adn_traite", "r")
sequence = adn.readlines()
Ob = []
for ligne in sequence:
for nclt in ligne:
if nclt in ['0', '1', '2', '3']:
Ob.append(int(nclt))
adn.close()
def sequencageADN(Obs):
precision = 0.1
A = 0.25 * np.reshape(np.ones(16), (4, 4))
B = 0.25 * np.reshape(np.ones(16), (4, 4))
Ap, Bp = baum_welch_naif(A, B, Obs)
while np.linalg.norm(A - Ap) < precision or np.linalg.norm(B - Bp)<precision:
A = Ap
B = Bp
Ap, Bp = baum_welch_naif(A, B, Obs)
return A, B
#print(sequencageADN(Ob))
def ruine_du_joueur(N, p, T = 100):
X_t = np.zeros(2*N+1)
X_t[N] = 1.0
T = list(range(1, T))
A = np.reshape(np.zeros((2*N+1)**2), ((2*N+1),(2*N+1)))
A[0][0] = 1
A[-1][-1] = 1
for i in range(1, 2*N):
A[i][i-1] = 1-p
A[i][i+1] = p
print(A)
Argent = []
for t in T:
m = max(X_t)
for k in range(2*N+1):
if X_t[k] == m:
Argent.append(k)
break
X_t = np.dot(X_t, A)
plt.plot(T, Argent)
import random as rd
def vol_du_joueur(N, p):
for _ in range(3):
X = [0]
Y = [N]
temps = 0
A = N
while A > 0 and A < 2*N:
temps += 1
if rd.random()< p:
A += 1
else:
A -= 1
X.append(temps)
Y.append(A)
plt.plot(X, Y)
plt.xlabel("temps")
plt.ylabel("Pieces")
plt.show()
#vol_du_joueur(20, 0.5)
def temps_de_vol(N,p):
Y = []
nb_essais = 100000
for k in range(nb_essais):
temps = 0
A = N
while A > 0 and A < 2*N:
temps += 1
if rd.random()< p:
A += 1
else:
A -= 1
Y.append(temps)
Yp = [0]*(max(Y)+1)
for y in Y:
Yp[y] += 1
plt.bar(list(range(max(Y)+1)), Yp, width=1.0, edgecolor = "#981FFA")
plt.show()
#temps_de_vol(200, 0.7)
import cmath
def mouvement_brownien(N):
position = 0 + 0j
X = [position]
t = 1
i = 1j
direction = 1
while t < N:
dir = rd.random()
dist = rd.random()
if dir < 0.05 or 0.50> dir > 0.45:
if dist <0.01:
direction *= cmath.exp(dir*2*np.pi*1j)
position = position + dist * direction
X.append(position)
t += 1
plt.plot( [ z.real for z in X], [z.imag for z in X])
plt.show()
mouvement_brownien(10000)
def baum_welch(A, B, Obs):
N = len(A)
T = len(Obs)
alphas = np.reshape(np.zeros(N*T), (T, N))
betas = np.reshape(np.zeros(N*T), (T, N))
# trouver toutes les valeurs des alphas et betas
alphas[:][0] = B[:][Obs[0]]
betas[T-1][:] = np.ones(N)
for t in range(1, T-2):
for j in range(N):
alphas[t][j] = B[j][Obs[t]] * sum( alphas[t-1][i] * A[i][j] for i in range(N))
betas[T-1-t][j] = B[Obs[T-t]][j] * sum( betas[T-t][i] * A[j][i] for i in range(N))
constantesC = []
for t in range(T):
C = sum(alphas[t][:])**(-1)
constantes.append(C)
alphas[t][:] = alphas[t][:] / C
constancesc = []
for t in range(T):
c = 0
for y in range(N):
c += np.dot(alphas[t][:], A[y]) * B[t][y]
c = 1 / c
constantesc.append(c)
Pobs = sum(alphas[T-1][:])
# step Expectations
zeta = np.reshape(np.zeros(N*N*T), (T,N, N))
gamma = np.reshape(np.zeros(N*T), (T,N))
for t in range(T-1):
for i in range(N):
for j in range(N):
zeta[t][i][j] = alphas[t][i] * betas[t+1][j] * A[i][j] * B[Obs[t]][j] / Pobs
for t in range(T):
for i in range(N):
gamma[t][i] = (alphas[t][j] * betas[t][j]) / Pobs
#step S
nouvA = np.reshape(np.zeros(N**2), (N,N))
nouvB = np.reshape(np.zeros(N * len(B[0])), (N, len(B[0])))
for i in range(N):
denom = sum( sum( zeta[t][i][k] for k in range(N)) for t in range(T))
for j in range(N):
nouvA[i][j] = sum( zeta[t][i][j] for t in range(T)) / denom
for j in range(N):
for k in range(len(B)):
denom = sum(gamma[t][j] for t in range(T))
for t in range(T):
if Obs[t] == k:
nouvB[j][k] = nouvB[j][k] + gamma[t][j] / denom
return nouvA, nouvB
|
kmlst/TIPE-Coding-regions-in-DNA-with-Hidden-Markov-Model
|
tipe_code.py
|
tipe_code.py
|
py
| 10,617 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6043134873
|
import logging
import certifi
import random, string
from elasticsearch import Elasticsearch
from flask import Flask, render_template, request, redirect, url_for, flash
from datetime import datetime
from quiz import quiz
app = Flask(__name__)
app.secret_key = 'dfuy48yerhfjdbsklueio'
es = Elasticsearch(
['https://host:port'],
http_auth=('user', 'pass'),
send_get_body_as='POST', # needed for GAE
use_ssl=True,
ca_certs=certifi.where()
)
@app.route('/')
def index():
return render_template('index.html', quiz=quiz)
@app.route('/submit', methods=['POST'])
def submit():
form = request.form.to_dict()
doc = {
'timestamp': datetime.utcnow(),
'email' : form['email'],
'remote_addr' : request.remote_addr,
'user_agent' : request.headers.get('User-Agent'),
'correct': True
}
for q in quiz:
doc[q['name']] = {
'question' : q['question'],
'answer' : form[q['name']]
}
if form[q['name']] != [i for i in q['options'] if i['correct']][0]['answer']:
doc['correct'] = False
es.index(index='esquiz', doc_type='answer', pipeline='esquiz', body=doc)
flash('Thanks for your response')
return redirect(url_for('index'))
@app.route('/draw', methods=['GET'])
def draw():
seed = ''.join(random.choice(string.lowercase) for i in range(20))
query = {
"query": {
"function_score": {
"query": { "term" : { "correct" : True } },
"functions": [{
"random_score": { "seed": seed }
}]
}
}
}
email = None
res = es.search(index='esquiz', body=query, size=1, _source_include="email")
if res['hits']['total'] > 0:
email = res['hits']['hits'][0]['_source']['email']
return render_template('draw.html', winner=email)
@app.errorhandler(500)
def server_error(e):
# For Google App Engine
logging.exception('An error occurred during a request.')
return 'An internal error occurred.', 500
if __name__ == '__main__':
app.run()
|
mcascallares/esquiz
|
main.py
|
main.py
|
py
| 2,109 |
python
|
en
|
code
| 1 |
github-code
|
6
|
910310900
|
import os, sys
from glob import glob
__all__ = ['context', 'Context']
class Context(object):
'''Finds out where the data directory is located etc.
The data directory contains data files with standard basis sets and
pseudo potentials.
'''
def __init__(self):
# Determine data directory (also for in-place build)
self.data_dir = os.getenv('HORTONDATA')
if self.data_dir is None:
fn_data_dir = os.path.join(os.path.dirname(__file__), 'data_dir.txt')
if os.path.isfile(fn_data_dir):
with open(fn_data_dir) as f:
self.data_dir = os.path.join(f.read().strip(), 'share/horton')
if self.data_dir is None:
self.data_dir = './data'
self.data_dir = os.path.abspath(self.data_dir)
# Determine include directory
self.include_dir = os.getenv('HORTONINCLUDE')
if self.include_dir is None:
fn_data_dir = os.path.join(os.path.dirname(__file__), 'data_dir.txt')
if os.path.isfile(fn_data_dir):
with open(fn_data_dir) as f:
self.include_dir = os.path.join(
f.read().strip(),
'include/python%i.%i' % (sys.version_info.major, sys.version_info.minor))
if not os.path.isdir(self.data_dir):
raise IOError('Can not find the data files. The directory %s does not exist.' % self.data_dir)
def get_fn(self, filename):
'''Return the full path to the given filename in the data directory.'''
return os.path.join(self.data_dir, filename)
def glob(self, pattern):
'''Return all files in the data directory that match the given pattern.'''
return glob(self.get_fn(pattern))
def get_include(self):
'''Return the list with directories containing header files (.h and .pxd)'''
return self.include_dir
context = Context()
|
theochem/horton
|
horton/context.py
|
context.py
|
py
| 1,945 |
python
|
en
|
code
| 83 |
github-code
|
6
|
26538766731
|
from backend.common.exceptions.common import RuleValidationException, ParameterException
from backend.common.exceptions.mquery import MqueryException
from backend.helpers.async_elastic_helper import AsyncElasticHelper
from backend.helpers.mquery_helper import MqueryHelper
from backend.helpers.yara_helper import YaraHelper
from backend.schema.add_retrohunt_schema import AddRetrohuntTaskSchema, PriorityTypes
class YaraRetrohuntFacade:
yara_helper = YaraHelper
mquery_helper = MqueryHelper
async_elastic_helper = AsyncElasticHelper
@classmethod
async def add_retrohunt_task(cls, rule_id: str, task: AddRetrohuntTaskSchema):
if not await cls.yara_helper.is_yara_valid(task.yara):
raise RuleValidationException(f"Syntax error for new Yara rule")
resp_json, resp_status = await cls.mquery_helper.submit_task(task.yara, task.priority.value)
if resp_status == 200:
task_id = resp_json["query_hash"]
resp_json, resp_status = await cls.mquery_helper.view_task_status(task_id)
if resp_status != 200:
raise MqueryException(f"Problem submitting yara rule. Internal status code: {resp_status}")
# Update rule in Elastic with a reference of the task_uid
await cls.async_elastic_helper.add_retrohunt_task_id_to_rule(rule_id, task_id)
elif resp_status == 400:
raise ParameterException("Yara rule is not correctly formatted")
else:
raise MqueryException("Problem submitting yara rule")
@classmethod
async def remove_retrohunt_task(cls, task_id: str):
resp_json, resp_status = await cls.mquery_helper.remove_task(task_id)
if resp_status != 200 or resp_json["status"] != "ok":
raise MqueryException(
f'Error removing retrohunt task {task_id}, status code: {resp_status}', status_code=resp_status
)
rule = (await cls.async_elastic_helper.get_rules_by_retrohunt_task(task_id))[0]
await cls.async_elastic_helper.add_retrohunt_task_id_to_rule(rule['_id'], '')
@classmethod
async def resubmit_task(cls, rule_id: str):
rule = await cls.async_elastic_helper.get_rule_by_id(rule_id)
await cls.add_retrohunt_task(
str(rule.id), AddRetrohuntTaskSchema(yara=rule.body, priority=PriorityTypes.medium)
)
resp_json, resp_status = await cls.mquery_helper.remove_task(rule.retrohunt_task_id)
if resp_status != 200 or resp_json["status"] != "ok":
raise MqueryException(
f'Error removing retrohunt task {rule.retrohunt_task_id}, status code: {resp_status}', status_code=resp_status
)
|
CorraMatte/malstream
|
backend/facades/yara_retrohunt_facade.py
|
yara_retrohunt_facade.py
|
py
| 2,715 |
python
|
en
|
code
| 3 |
github-code
|
6
|
16421467025
|
# Test the models with LG_chem stock
# If the prediction is success, Expand the number of stock
import math
import os
import pdb
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from sklearn.preprocessing import MinMaxScaler
base = os.path.abspath(__file__)
base = base.split('/')
def save_stock_plot(rawdata, stock_name="LG_chme"):
global base
try:
plt.figure(figsize=(20,5))
plt.plot(range(len(rawdata)), rawdata['Close'])
path = "/".join(base[:-2]+["models"])
file_name = f"/{stock_name}.jpg"
path += file_name
plt.savefig(path)
print("Save Success!!")
except Exception as e:
print(f"Save Stock plot Failed!!: {e}")
class windowDataset(Dataset):
def __init__(self, y, input_window=80, output_window=20, stride=5, n_attr=1):
#총 데이터의 개수
L = y.shape[0]
#stride씩 움직일 때 생기는 총 sample의 개수
num_samples = (L - input_window - output_window) // stride + 1
if n_attr == 1:
#input과 output
X = np.zeros([input_window, num_samples])
Y = np.zeros([output_window, num_samples])
for i in np.arange(num_samples):
start_x = stride*i
end_x = start_x + input_window
X[:,i] = y[start_x:end_x]
start_y = stride*i + input_window
end_y = start_y + output_window
Y[:,i] = y[start_y:end_y]
X = X.reshape(X.shape[0], X.shape[1], n_attr)
Y = Y.reshape(Y.shape[0], Y.shape[1], n_attr)
X = X.transpose((1,0,2))
Y = Y.transpose((1,0,2))
self.x = X
self.y = Y
else:
#input과 output
X = np.zeros([input_window, n_attr, num_samples])
Y = np.zeros([output_window, n_attr, num_samples])
for i in np.arange(num_samples):
start_x = stride*i
end_x = start_x + input_window
X[:,:,i] = y[start_x:end_x]
start_y = stride*i + input_window
end_y = start_y + output_window
Y[:,:,i] = y[start_y:end_y]
X = X.reshape(X.shape[2], X.shape[0], X.shape[1])
Y = Y.reshape(Y.shape[2], Y.shape[0], Y.shape[1])
self.x = X
self.y = Y
self.len = len(X)
def __getitem__(self, i):
return self.x[i], self.y[i]
#return self.x[i], self.y[i, :-1], self.y[i,1:]
def __len__(self):
return self.len
class TFModel(nn.Module):
def __init__(self,iw, ow, d_model, nhead, nlayers, dropout=0.5, n_attr=1):
super(TFModel, self).__init__()
self.encoder_layer = nn.TransformerEncoderLayer(d_model=d_model, nhead=nhead, dropout=dropout)
self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=nlayers)
self.pos_encoder = PositionalEncoding(d_model, dropout)
self.encoder = nn.Sequential(
nn.Linear(n_attr, d_model//2),
nn.ReLU(),
nn.Linear(d_model//2, d_model)
)
self.linear = nn.Sequential(
nn.Linear(d_model, d_model//2),
nn.ReLU(),
nn.Linear(d_model//2, n_attr)
)
self.linear2 = nn.Sequential(
nn.Linear(iw, (iw+ow)//2),
nn.ReLU(),
nn.Linear((iw+ow)//2, ow)
)
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def forward(self, src, srcmask):
src = self.encoder(src)
src = self.pos_encoder(src)
output = self.transformer_encoder(src.transpose(0,1), srcmask).transpose(0,1)
output = self.linear(output)[:,:,0]
output = self.linear2(output)
return output
class TFModel2(nn.Module):
def __init__(self,d_model, nhead, nhid, nlayers, dropout=0.5, n_attr=7):
super(TFModel2, self).__init__()
self.transformer = nn.Transformer(d_model=d_model, nhead=nhead, dim_feedforward=nhid, num_encoder_layers=nlayers, num_decoder_layers=nlayers,dropout=dropout)
self.pos_encoder = PositionalEncoding(d_model, dropout)
self.pos_encoder_d = PositionalEncoding(d_model, dropout)
self.linear = nn.Linear(d_model, n_attr)
self.encoder = nn.Linear(n_attr, d_model)
self.encoder_d = nn.Linear(n_attr, d_model)
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def forward(self, src, tgt, srcmask, tgtmask):
src = self.encoder(src)
src = self.pos_encoder(src)
tgt = self.encoder_d(tgt)
tgt = self.pos_encoder_d(tgt)
output = self.transformer(src.transpose(0,1), tgt.transpose(0,1), srcmask, tgtmask)
output = self.linear(output)
return output
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
def gen_attention_mask(x):
mask = torch.eq(x, 0)
return mask
def evaluate(data_train, device, model, iw, n_attr, length):
# 마지막 30*2일 입력으로 넣어서 그 이후 30일 예측 결과 얻음.
input = torch.tensor(data_train[-iw:]).reshape(1,-1,n_attr).to(device).float().to(device)
model.eval()
src_mask = model.generate_square_subsequent_mask(input.shape[1]).to(device)
predictions = model(input, src_mask)
return predictions.detach().cpu().numpy()
"""
input = torch.tensor(data_train[-iw:]).reshape(1,-1,n_attr).to(device).float().to(device)
output = torch.tensor(data_train[-1].reshape(1,-1,n_attr)).float().to(device)
model.eval()
for i in range(length):
src_mask = model.generate_square_subsequent_mask(input.shape[1]).to(device)
tgt_mask = model.generate_square_subsequent_mask(output.shape[1]).to(device)
predictions = model(input, output, src_mask, tgt_mask).transpose(0,1)
predictions = predictions[:, -1:, :]
output = torch.cat([output, predictions.to(device)], axis=1)
return torch.squeeze(output, axis=0).detach().cpu().numpy()[1:]
"""
def predict(stock, period):
global base
print(f"Notice: Since it is in the initial stage of the service, \
we predict only the stock price of LG Chem, not the stock price \
of the designated company.\n\n")
# 이 코드대신 지수형이 spl로 얻어온 data가 rawdata가 되어야 함.
# 추가적인 정보 없는건 1729일
print(f"Loading Stock Data ...")
n_attr = 1
path = "/".join(base[:-3]+["data","lg_chem_closing_prices.csv"])
model_path = "/".join(base[:-2]+["Prediction", f"{stock}_{datetime.now().date()}.pth"])
rawdata = pd.read_csv(path)
print(f"Saving Stock data as .png ...")
save_stock_plot(rawdata, stock)
#pdb.set_trace()
print(f"Preprocessing Data with MinMaxScaling ...")
min_max_scaler = MinMaxScaler()
rawdata["Close"] = min_max_scaler.fit_transform(rawdata["Close"].to_numpy().reshape(-1,n_attr))
print(f"Spliting Data ...")
iw = 30*7
ow = 10
train = rawdata[:-iw]
data_train = train["Close"].to_numpy()
test = rawdata[-iw:]
data_test = test["Close"].to_numpy()
print(f"Preparing Dataset ...")
train_dataset = windowDataset(data_train, input_window=iw, output_window=ow, stride=1, n_attr=n_attr)
train_loader = DataLoader(train_dataset, batch_size=64)
#test_dataset = windowDataset(data_test, input_window=iw, output_window=ow, stride=1, n_attr=n_attr)
#test_loader = DataLoader(test_dataset)
"""
# 성능 올리기위해 종가말고 다른 것도 같이 넣음.
# 총 1720일의 data있음
print(f"Loading Stock Data ...")
n_attr = 7
path = "/".join(base[:-3]+["data","lg_chem_prices.csv"])
rawdata = pd.read_csv(path)
#print(f"Saving Stock data as .png ...")
#save_stock_plot(rawdata, stock)
print(f"Preprocessing Data with MinMaxScaling ...")
min_max_scaler = MinMaxScaler()
rawdata.loc[:,rawdata.columns] = min_max_scaler.fit_transform(rawdata.to_numpy())
print(f"Spliting Data ...")
iw = 60
ow = 5
#pdb.set_trace()
train = rawdata[:-(iw)]
data_train = train.to_numpy()
test = rawdata[-(iw):]
data_test = test.to_numpy()
print(f"Preparing Dataset ...")
train_dataset = windowDataset(data_train, input_window=iw, output_window=ow, stride=1, n_attr=n_attr)
train_loader = DataLoader(train_dataset, batch_size=64)
#test_dataset = windowDataset(data_test, input_window=iw, output_window=ow, stride=1, n_attr=n_attr)
#test_loader = DataLoader(test_dataset)
"""
print(f"Model Constructing ...")
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
lr = 1e-4
#model = TFModel2(256, 8, 256, 2, 0.1, n_attr).to(device)
model = TFModel(iw, ow, 512, 8, 4, 0.4, n_attr).to(device)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
if not os.path.exists(model_path):
print("Trainig ...")
epoch = 10
model.train()
for i in range(epoch):
batchloss = 0.0
for (inputs, outputs) in tqdm(train_loader):
optimizer.zero_grad()
src_mask = model.generate_square_subsequent_mask(inputs.shape[1]).to(device)
result = model(inputs.float().to(device), src_mask)
loss = criterion(result, outputs[:,:,0].float().to(device))
loss.backward()
optimizer.step()
batchloss += loss
print(f"{i+1}th epoch MSEloss:" + "{:0.6f}".format(batchloss.cpu().item() / len(train_loader)))
torch.save(model, model_path)
"""
model.train()
progress = tqdm(range(epoch))
for i in progress:
batchloss = 0.0
for (inputs, dec_inputs, outputs) in train_loader:
optimizer.zero_grad()
src_mask = model.generate_square_subsequent_mask(inputs.shape[1]).to(device)
tgt_mask = model.generate_square_subsequent_mask(dec_inputs.shape[1]).to(device)
result = model(inputs.float().to(device), dec_inputs.float().to(device), src_mask, tgt_mask)
loss = criterion(result.permute(1,0,2), outputs.float().to(device))
loss.backward()
optimizer.step()
batchloss += loss
progress.set_description("{:0.5f}".format(batchloss.cpu().item() / len(train_loader)))
"""
torch.save(model.state_dict(), model_path)
print("Predicting ...")
result = evaluate(data_test, device, model, iw, n_attr, ow)
result = min_max_scaler.inverse_transform(result)[0]
real = rawdata["Close"].to_numpy()
real = min_max_scaler.inverse_transform(real.reshape(-1,1))[:,0]
#pdb.set_trace()
"""
tmp = np.zeros((10,7))
tmp[:,:] = result.reshape(10,-1)
result = tmp
result = min_max_scaler.inverse_transform(result).reshape(-1,10)[3]
real = rawdata.to_numpy()
real = min_max_scaler.inverse_transform(real)[:,3]
"""
plt.figure(figsize=(20,5))
#plt.plot(range(1419,1719),real[1420:], label="real")
plt.plot(range(1419,1719),real[1418:],label="real")
plt.plot(range(1719-ow,1719),result, label="predict")
plt.legend()
path = "/".join(base[:-2]+["models","prediction2.jpg"])
plt.savefig(path)
print(f"Complete!!")
# 예측된 가격의 평균과, 직전의 값을 비교했을 때, 평균이 크면 사라, 작으면 사지 마라.
mean_pred = np.mean(result)
if mean_pred >= real[-1]:
answer = f"""You should buy the stock you want to know the price, because we predict the price will rise.
Maybe it will be {mean_pred}won."""
else:
answer = f"""You shouldn't buy the stock you want to know the price, because we predict the price will go down.
Maybe it will be {mean_pred}won."""
return answer
if __name__=="__main__":
print(predict("", ""))
|
groundwater98/Miraeasset_Bigdata_Festival
|
ML/Prediction/predict.py
|
predict.py
|
py
| 13,225 |
python
|
en
|
code
| 1 |
github-code
|
6
|
28031383283
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import rospy
import cv2
import cv_bridge
import sensor_msgs.msg
import argparse
import numpy as np
class image_converter:
def __init__(self, input_topic, output_topic):
self.image_pub = rospy.Publisher(
output_topic, sensor_msgs.msg.Image, queue_size=10)
self.bridge = cv_bridge.CvBridge()
self.image_sub = rospy.Subscriber(
input_topic, sensor_msgs.msg.Image,
self.callback)
def callback(self, data):
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
#
# DO SOMETHING
#
cv_image = self.canny_edge(cv_image)
try:
ros_img = self.bridge.cv2_to_imgmsg(cv_image, "mono8") # canny: mono8(8UC1)
except cv_bridge.CvBridgeError as e:
print(e)
self.image_pub.publish(ros_img)
def canny_edge(self, img):
"""
return canny edge image
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
canny = cv2.Canny(gray, 100, 150)
return canny
# Usage:
# rosrun PACKAGE image_converter.py input:=/camera/image_raw_throttle output:=/test
#
def main(args):
rospy.init_node('image_converter', anonymous=True)
input_topic = rospy.resolve_name("input")
output_topic = rospy.resolve_name("output")
print("input_topic: %s" % (input_topic,))
print("output_topic: %s" % (output_topic,))
sys.stdout.flush()
ic = image_converter(input_topic, output_topic)
try:
print("Invoke rospy.spin().")
sys.stdout.flush()
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
|
kargenk/image_converter
|
image_converter.py
|
image_converter.py
|
py
| 1,762 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9842073923
|
import redneuronal
import random
import time
import statistics
class RedNeuronalGA:
def __init__(self, size, config:list, inputsize, mut_rate = 0.01, lastbest_rate = 0.5, tour_size = 10):
'''
:param size:
:param n_genes:
:param config: lista que indica [numero de layers, [numero de neuronas por layer]]
:param mut_rate:
:param lastbest_rate:
:param tour_size:
'''
self.totalfitness = []
self.inputsize = inputsize
self.lastbest_rate = lastbest_rate
self.pop_size = size
self.mutation_rate = mut_rate
self.tournament_size = tour_size
self.current_generation = [] #lista de redes
self.current_fitness = [] #valores de fitness encontrados despues de jugar
self.final_ind = None
self.config = config
def set_tournamentsize(self, size):
self.tournament_size = size
def set_mutationrate(self, rate):
self.mutation_rate = rate
def set_survivalrate(self, rate):
self.lastbest_rate = rate
def initialize(self):
for i in range(self.pop_size):
red = []
n_layers = self.config[0]
for j in range(n_layers): #numero de layers
layer = []
n_neuronasj = self.config[1][j]
for k in range(n_neuronasj):
neuron = []
if j == 0:
for p in range(self.inputsize):
neuron.append(random.random()*2)
else:
for p in range(self.config[1][j-1]):
neuron.append(random.random()*2)
neuron.append(0.8)
layer.append(neuron)
red.append(layer)
self.current_generation.append(redneuronal.RedN(red, i))
def tournament_selection(self, population: list, k):
''' Randomly select the best individual after k iterations'''
N = len(population)
best = None
for i in range(k):
ind = population[random.randint(0, N - 1)]
if best == None or self.fitness(ind) > self.fitness(best):
best = ind
return best
def reproduce(self, red1:redneuronal.RedN, red2:redneuronal.RedN, index):
nlayers = len(red1.red)
new = []
for i in range(nlayers):
layer1 = red1.red[i]
layer2 = red2.red[i]
l = len(layer1)
r = random.randint(1, l - 1)
rep = layer1[0:r] + layer2[r:l]
baby = []
for i in range(l):
if random.random() < self.mutation_rate:
baby.append(self.mutneuron(rep[i]))
else:
baby.append(rep[i])
new.append(baby)
return redneuronal.RedN(new, index)
def find(self):
# best individuals of last generation
best = []
size = self.pop_size
# selecciono a los posibles mejores
while (len(best) < size*self.lastbest_rate):
sel = self.tournament_selection(self.current_generation, self.tournament_size)
if sel not in best:
best.append(sel)
self.current_generation.remove(sel)
# crear nueva generacion a partir de los mejores anteriores
gen = []
count = 0
while (len(gen) < size):
ind1, ind2 = random.sample(best, 2)
baby = self.reproduce(ind1, ind2,count)
count+=1
gen.append(baby)
self.current_generation = gen
self.savefitness()
self.current_fitness = []
def fitness(self, ind:redneuronal.RedN):
return self.current_fitness[ind.index]
def savefitness(self):
self.totalfitness.append(statistics.mean(self.current_fitness))
def mutneuron(self, neuron:list):
print('mutante!')
new = []
for i in range(len(neuron)-1):
if random.random()<0.5:
new.append(random.random() * 2)
else:
new.append(neuron[i])
new.append((neuron[-1]+random.random())%2)
return new
|
plt1994/cc5114ne
|
genalg.py
|
genalg.py
|
py
| 4,234 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72873485629
|
import fileinput;
import os;
path = "E:\pythonProjects\Simple Examples\phpFiles";
data = os.listdir(path);
i=0;
wordtofind = input("Enter Word To Find");
wordtoreplace = input("Enter Word To Replace");
def manipulate(param):
rfile = open(param).read()
if rfile.__contains__(wordtofind):
rfile = rfile.replace(wordtofind,wordtoreplace)
wfile = open(param,'w')
wfile.write(rfile)
wfile.close()
while (i<len(data)):
if not data[i].startswith("Python.py"):
manipulate(data[i])
i=i+1;
|
ebuddiess/pythonTheSnake
|
Simple Examples/ContentRenamer.py
|
ContentRenamer.py
|
py
| 527 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44713652316
|
import sys
import xmltodict
color_names = {
'Foreground Color': 'ForegroundColour',
'Background Color': 'BackgroundColour',
'Cursor Text Color': 'CursorColour',
'Ansi 0 Color': 'Black',
'Ansi 1 Color': 'Red',
'Ansi 2 Color': 'Green',
'Ansi 3 Color': 'Yellow',
'Ansi 4 Color': 'Blue',
'Ansi 5 Color': 'Magenta',
'Ansi 6 Color': 'Cyan',
'Ansi 7 Color': 'White',
'Ansi 8 Color': 'BoldBlack',
'Ansi 9 Color': 'BoldRed',
'Ansi 10 Color': 'BoldGreen',
'Ansi 11 Color': 'BoldYellow',
'Ansi 12 Color': 'BoldBlue',
'Ansi 13 Color': 'BoldMagenta',
'Ansi 14 Color': 'BoldCyan',
'Ansi 15 Color': 'BoldWhite'
}
def get_color(data, name):
color_data = data['dict'][data['key'].index(name)]
red = get_component(color_data, 'Red Component')
green = get_component(color_data, 'Green Component')
blue = get_component(color_data, 'Blue Component')
return (red, green, blue)
def get_component(color_data, component_name):
component_index = color_data['key'].index(component_name)
component_value = color_data['real'][component_index]
return round(float(component_value) * 256)
input_filename = sys.argv[1]
with open(input_filename) as fd:
iterm = xmltodict.parse(fd.read())['plist']['dict']
fg_data = get_color(iterm, 'Foreground Color')
for iterm_color in color_names.keys():
mintty_color = color_names[iterm_color]
color = get_color(iterm, iterm_color)
print("{} = {}, {}, {}".format(mintty_color, color[0], color[1], color[2]))
|
arcadecoffee/iterm-to-mintty
|
iterm-to-mintty.py
|
iterm-to-mintty.py
|
py
| 1,706 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25008840061
|
from osv import fields, osv
import ir
class partner_wh_rebate(osv.osv):
_name = "res.partner"
_inherit = "res.partner"
_columns = {
'rebate': fields.float('Rebate (%)', digits=(5, 2)),
}
partner_wh_rebate()
class sale_order_rebate(osv.osv):
_name = "sale.order"
_inherit = "sale.order"
def _amount_wo_rebate(self, cr, uid, ids, field_name, arg, context):
return super(sale_order_rebate, self)._amount_untaxed(cr, uid, ids, field_name, arg, context)
def _amount_rebate(self, cr, uid, ids, field_name, arg, context):
wo_rebate = self._amount_wo_rebate(cr, uid, ids, field_name, arg, context)
orders = self.read(cr, uid, ids, ['rebate_percent'], context)
rebates = dict([(o['id'], o['rebate_percent']) for o in orders])
res = {}
for id in ids:
res[id] = wo_rebate.get(id, 0.0) * (rebates.get(id, 0.0) / 100.0)
return res
def _amount_untaxed(self, cr, uid, ids, field_name, arg, context):
wo_rebate = self._amount_wo_rebate(cr, uid, ids, field_name, arg, context)
rebate = self._amount_rebate(cr, uid, ids, field_name, arg, context)
res = {}
for id in ids:
res[id] = wo_rebate.get(id, 0.0) - rebate.get(id, 0.0)
return res
def _amount_tax(self, cr, uid, ids, field_name, arg, context):
res = {}
cur_obj=self.pool.get('res.currency')
for order in self.browse(cr, uid, ids):
val = 0.0
cur=order.pricelist_id.currency_id
for line in order.order_line:
for c in self.pool.get('account.tax').compute(cr, uid, line.tax_id, line.price_unit, line.product_uom_qty, order.partner_invoice_id.id):
val += cur_obj.round(cr, uid, cur, (c['amount'] * (100.0 - order.rebate_percent) / 100.0))
res[order.id] = cur_obj.round(cr, uid, cur, val)
return res
_columns = {
'rebate_percent': fields.float('Rebate (%)', digits=(5, 2), readonly=True, states={'draft':[('readonly',False)]}),
# 'rebate_account': fields.many2one('account.account', 'Rebate account', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'amount_wo_rebate': fields.function(_amount_wo_rebate, method=True, string='Intermediate sum'),
'amount_rebate': fields.function(_amount_rebate, method=True, string='Rebate'),
'amount_untaxed': fields.function(_amount_untaxed, method=True, string='Untaxed Amount'),
'amount_tax': fields.function(_amount_tax, method=True, string='Taxes'),
}
_defaults = {
'rebate_percent': lambda *a: 0.0,
}
#
# Why not using super().onchange_partner_id ?
#
def onchange_partner_id(self, cr, uid, ids, partner_id):
if not partner_id:
return {'value': {'partner_invoice_id': False, 'partner_shipping_id': False, 'partner_order_id': False}}
partner = self.pool.get('res.partner').browse(cr, uid, partner_id)
addr = self.pool.get('res.partner').address_get(cr, uid, [partner_id], ['delivery', 'invoice', 'contact'])
pricelist = partner.property_product_pricelist.id
return {
'value': {
'rebate_percent': partner.rebate or 0.0,
'partner_invoice_id': addr['invoice'],
'partner_order_id': addr['contact'],
'partner_shipping_id': addr['delivery'],
'pricelist_id': pricelist
}
}
def action_invoice_create(self, cr, uid, ids, grouped=False, states=['confirmed','done']):
assert len(ids)==1, "Can only invoice one sale order at a time"
invoice_id = super(sale_order_rebate, self).action_invoice_create(cr, uid, ids, grouped, states)
if invoice_id:
order = self.browse(cr, uid, ids[0])
inv_obj = self.pool.get('account.invoice')
inv_obj.write(cr, uid, [invoice_id], {'rebate_percent': order.rebate_percent})
inv_obj.button_compute(cr, uid, [invoice_id])
return invoice_id
sale_order_rebate()
class account_invoice_wh_rebate(osv.osv):
_name = "account.invoice"
_inherit = "account.invoice"
def _amount_wo_rebate(self, cr, uid, ids, field_name, arg, context):
return super(account_invoice_wh_rebate, self)._amount_untaxed(cr, uid, ids, field_name, arg, context)
def _amount_untaxed(self, cr, uid, ids, field_name, arg, context):
un_taxed = super(account_invoice_wh_rebate, self)._amount_untaxed(cr, uid, ids, field_name, arg, context)
res = {}
for invoice in self.browse(cr, uid, ids):
res[invoice.id] = un_taxed[invoice.id] - invoice.rebate_amount
return res
_columns = {
'amount_wo_rebate': fields.function(_amount_wo_rebate, method=True, string='Intermediate sum'),
'amount_untaxed': fields.function(_amount_untaxed, method=True, string='Untaxed Amount'),
'rebate_percent': fields.float('Rebate (%)', digits=(5, 2), readonly=True),
'rebate_amount': fields.float('Rebate amount', digits=(14, 2), readonly=True)
}
account_invoice_wh_rebate()
class account_invoice_line_wh_rebate(osv.osv):
_name = "account.invoice.line"
_inherit = "account.invoice.line"
def move_line_get(self, cr, uid, invoice_id):
invoice = self.pool.get('account.invoice').browse(cr, uid, invoice_id)
res = []
tax_grouped = {}
tax_obj = self.pool.get('account.tax')
#TODO: rewrite using browse instead of the manual SQL queries
cr.execute('SELECT * FROM account_invoice_line WHERE invoice_id=%s', (invoice_id,))
lines = cr.dictfetchall()
rebate_percent = invoice.rebate_percent
rebate_amount = 0.0
for line in lines:
price_unit = line['price_unit'] * (100.0 - rebate_percent) / 100.0
res.append({'type':'src', 'name':line['name'], 'price_unit':price_unit, 'quantity':line['quantity'], 'price':round(line['quantity']*price_unit, 2), 'account_id':line['account_id']})
cr.execute('SELECT tax_id FROM account_invoice_line_tax WHERE invoice_line_id=%s', (line['id'],))
rebate_amount += (line['price_unit'] * rebate_percent / 100.0) * line['quantity']
for (tax_id,) in cr.fetchall():
# even though we pass only one tax id at a time to compute, it can return several results
# in case a tax has a parent tax
sequence = tax_obj.read(cr, uid, [tax_id], ['sequence'])[0]['sequence']
for tax in tax_obj.compute(cr, uid, [tax_id], price_unit, line['quantity'], invoice.address_invoice_id.id):
tax['sequence'] = sequence
if invoice.type in ('out_invoice','in_refund'):
tax['account_id'] = tax['account_collected_id']
else:
tax['account_id'] = tax['account_paid_id']
key = tax['account_id']
if not key in tax_grouped:
tax_grouped[key] = tax
else:
tax_grouped[key]['amount'] += tax['amount']
# delete automatic tax lines for this invoice
cr.execute("DELETE FROM account_invoice_tax WHERE NOT manual AND invoice_id=%s", (invoice_id,))
# (re)create them
ait = self.pool.get('account.invoice.tax')
for t in tax_grouped.values():
ait.create(cr, uid, {'invoice_id':invoice_id, 'name':t['name'], 'account_id':t['account_id'], 'amount':t['amount'], 'manual':False, 'sequence':t['sequence']})
# update rebate amount for this invoice
self.pool.get('account.invoice').write(cr, uid, [invoice_id], {'rebate_amount': rebate_amount})
return res
account_invoice_line_wh_rebate()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
factorlibre/openerp-extra-6.1
|
sale_rebate/sale.py
|
sale.py
|
py
| 7,924 |
python
|
en
|
code
| 9 |
github-code
|
6
|
33967529914
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
# @Time :2020/7/7 15:39
# @Author :Sheng Chen
# @Email :[email protected]
import sys
sys.path.append(r'/home/chensheng/likou')
from typing import List, Tuple
class Solution:
rows = [{} for i in range(9)]
columns = [{} for i in range(9)]
boxes = [{} for i in range(9)]
fillIndex = []
isValid = False
ini = False
def isValidSudoku(self, board: List[List[str]]):
# global rows,columns,boxes,fillIndex,isValid
for i in range(9):
for j in range(9):
num = board[i][j]
if num != '.':
self.rows[i][num] = self.rows[i].get(num, 0) + 1
self.columns[j][num] = self.columns[j].get(num, 0) + 1
boxIndex = (i // 3) * 3 + j // 3
self.boxes[boxIndex][num] = self.boxes[boxIndex].get(num, 0) + 1
if self.rows[i][num] > 1 or self.columns[j][num] > 1 or self.boxes[boxIndex][num] > 1:
return
else:
self.fillIndex.append((i, j))
self.isValid = True
self.ini = True
def solveSudoku(self, board: List[List[str]]) -> None:
if not self.ini:
self.isValidSudoku(board)
if not self.isValid:
return
if len(self.fillIndex) == 0:
return True
i, j = self.fillIndex.pop(0)
row = self.rows[i]
column = self.columns[j]
box = self.boxes[(i // 3) * 3 + j // 3]
dic = {**row, **column, **box}
candidate_num = [str(num) for num in range(1, 10) if str(num) not in dic]
if len(candidate_num) == 0:
self.fillIndex.insert(0, (i, j))
return False
else:
for num in candidate_num:
board[i][j] = num
self.rows[i][num] = 1
self.columns[j][num] = 1
self.boxes[(i // 3) * 3 + j // 3][num] = 1
a = self.solveSudoku(board)
if not a:
board[i][j] = '.'
del self.rows[i][num]
del self.columns[j][num]
del self.boxes[(i // 3) * 3 + j // 3][num]
else:
return True
self.fillIndex.insert(0, (i, j))
return False
if __name__ == '__main__':
board = [[".", ".", "9", "7", "4", "8", ".", ".", "."], ["7", ".", ".", ".", ".", ".", ".", ".", "."],
[".", "2", ".", "1", ".", "9", ".", ".", "."], [".", ".", "7", ".", ".", ".", "2", "4", "."],
[".", "6", "4", ".", "1", ".", "5", "9", "."], [".", "9", "8", ".", ".", ".", "3", ".", "."],
[".", ".", ".", "8", ".", "3", ".", "2", "."], [".", ".", ".", ".", ".", ".", ".", ".", "6"],
[".", ".", ".", "2", "7", "5", "9", ".", "."]]
obj = Solution()
obj.solveSudoku(board)
print(board)
print(4//2*2)
|
fqlovetu/likou_python
|
37解数独/solution1.py
|
solution1.py
|
py
| 2,979 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22959918169
|
#escreva um programa que leia um número inteiro e peça para o usuário escolher qual será a BASE DE CONVERSÃO:
# 1-> para binário; 2-> para octal & 3-> para hexadecimal
import math
print('\n\t Base de Conversão!')
num = int(input('\n Informe um número => '))
numConv = str(input(' Informe: \033[1;33m1 - binário, 2 - octal & 3 - hexadecimal\033[m => '))
if numConv == '1':
transfBin = bin(num)
print(' O número {} convertido para binário fica \033[1;33m{}\033[m'.format(num,transfBin))
elif numConv=='2':
transfOctal = oct(num)
print(' O número {} convertido para octal fica \033[1;33m{}\033[m'.format(num, transfOctal))
elif numConv=='3':
transfHex = hex(num)
print( 'O número {} convertido para hexadecimal fica \033[1;33m{}\033[m'.format(num, transfHex))
else:
print('\033[1;31m O número informado não existe na tabela\033[m')
|
eduardabenevenutti77/curso_em_video.py
|
mundo2 - python/if_else/BaseConversao.py
|
BaseConversao.py
|
py
| 891 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
1293789301
|
import numpy as np
import onnx
from tests.tools import expect
class Sqrt:
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Sqrt',
inputs=['x'],
outputs=['y'],
)
x = np.array([1, 4, 9]).astype(np.float32)
y = np.sqrt(x) # expected output [1., 2., 3.]
expect(node, inputs=[x], outputs=[y], name='test_sqrt_example')
x = np.abs(np.random.randn(3, 4, 5).astype(np.float32))
y = np.sqrt(x)
expect(node, inputs=[x], outputs=[y], name='test_sqrt')
if __name__ == '__main__':
Sqrt.export()
|
gglin001/onnx-jax
|
tests/node/test_sqrt.py
|
test_sqrt.py
|
py
| 632 |
python
|
en
|
code
| 7 |
github-code
|
6
|
70005437309
|
from __future__ import with_statement
from fabric.api import *
import os, glob, socket
import fabric.contrib.project as project
PROD = 'spreadwebm.org'
PROD_PATH = 'domains/spreadwebm.com/web/public/'
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
DEPLOY_PATH = os.path.join(ROOT_PATH, 'deploy')
def clean():
local('rm -rf ./deploy/*')
def regen():
clean()
local('hyde.py -g -s .')
def pushcss():
"""
For pushing CSS-only changes to the local docroot during testing.
"""
local('cp -r media/css/* deploy/media/css/')
def serve():
## Kill any heel process
local('heel --kill')
## Start webserver on local hostname
#local('heel --daemonize --address ' + socket.gethostbyaddr(socket.gethostname())[0] + ' --root ./deploy')
## Start webserver on development hostname
local('heel --daemonize --address localhost --root ./deploy')
def reserve():
regen()
local('heel --kill')
serve()
@hosts(PROD)
def publish():
regen()
project.rsync_project(
remote_dir=PROD_PATH,
local_dir=DEPLOY_PATH.rstrip('/') + '/',
delete=True
)
|
louquillio/spreadwebm.com
|
fabfile.py
|
fabfile.py
|
py
| 1,127 |
python
|
en
|
code
| 1 |
github-code
|
6
|
19617294623
|
# _*_ coding: utf-8 _*_
import os
import csv
import time
import json
import logging
import numpy as np
import tensorflow as tf
from sklearn.metrics import auc, roc_curve
# calculate_auc : calculate AUC rate
def calculate_auc(labels, predicts):
fpr, tpr, _ = roc_curve(labels, predicts, pos_label=1)
AUC = auc(fpr, tpr)
return fpr, tpr, AUC
def contrastive_loss(labels, distance):
loss = tf.to_float(tf.reduce_sum(tf.square(distance - labels)))
return loss
def compute_accuracy(prediction, labels, threshold=0.5):
accu = 0.0
for i in xrange(len(prediction)):
if labels[i][0] == 1:
if prediction[i][0] > threshold:
accu += 1.0
else:
if prediction[i][0] < threshold:
accu += 1.0
acc = accu / len(prediction)
return acc
# read_and_decode : generate a queue based on filename
def read_and_decode(filename):
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features={
'label': tf.FixedLenFeature([], tf.int64),
'cfg_1': tf.FixedLenFeature([], tf.string),
'cfg_2': tf.FixedLenFeature([], tf.string),
'dfg_1': tf.FixedLenFeature([], tf.string),
'dfg_2': tf.FixedLenFeature([], tf.string),
'fea_1': tf.FixedLenFeature([], tf.string),
'fea_2': tf.FixedLenFeature([], tf.string),
'num1': tf.FixedLenFeature([], tf.int64),
'num2': tf.FixedLenFeature([], tf.int64),
'max': tf.FixedLenFeature([], tf.int64)})
label = tf.cast(features['label'], tf.int32)
cfg_1 = features['cfg_1']
cfg_2 = features['cfg_2']
dfg_1 = features['dfg_1']
dfg_2 = features['dfg_2']
num1 = tf.cast(features['num1'], tf.int32)
fea_1 = features['fea_1']
num2 = tf.cast(features['num2'], tf.int32)
fea_2 = features['fea_2']
max_num = tf.cast(features['max'], tf.int32)
return label, cfg_1, cfg_2, dfg_1, dfg_2, fea_1, fea_2, num1, num2, max_num
# GoCloneTfHandler : handler using tensorflow to detect code clone in Golang
class GoCloneTfHandler(object):
def __init__(self, iteration_times=5, embedding_depth=2, embedding_size=64, feature_num=10, mini_batch=10, learning_rate=0.0001, max_iter=1, decay_steps=10, decay_rate=0.0001, snapshot=1, test_num=1000, train_tfrecord="tfrecord/train.tfrecord",test_tfrecord="tfrecord/test.tfrecord",valid_tfrecord="tfrecord/valid.tfrecord", exist_model="", ckpt_file="", test_file="", result_file="",func_info_path=""):
# self.iteration_times = iteration_times # T
self.embedding_depth = embedding_depth # N
self.embedding_size = embedding_size # P
self.feature_num = feature_num # D
self.mini_batch = mini_batch # B
self.learning_rate = learning_rate # lr
self.max_iter = max_iter
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.snapshot = snapshot
self.test_file = test_file
self.result_file = result_file
self.test_num = test_num
self.train_tfrecord = train_tfrecord
self.test_tfrecord = test_tfrecord
self.valid_tfrecord = valid_tfrecord
self.exist_model = exist_model
self.ckpt_file = ckpt_file
self.func_info_path = func_info_path
self.pair_list = []
self.logger = logging.getLogger("default")
self.logger_init()
def load_csv_as_pair(self, pair_label_file):
with open(pair_label_file, "r") as fp:
pair_label = csv.reader(fp)
for line in pair_label:
self.pair_list.append((line[0], line[1]))
# logger_init : initialize logger for console and file
def logger_init(self):
self.logger.setLevel(logging.DEBUG)
log_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(log_format)
self.logger.addHandler(console_handler)
log_file_name = "logs/log%s.txt" % time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())
file_handler = logging.FileHandler(log_file_name, mode='w', encoding='utf-8')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(log_format)
self.logger.addHandler(file_handler)
# structure2vec : Construct pairs dataset to train the model.
def structure2vec(self, mu_prev, cfg, dfg, x, name="structure2vec"):
with tf.variable_scope(name):
W_1 = tf.get_variable('W_1', [self.feature_num, self.embedding_size], tf.float32, tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
param_cfg = tf.get_variable('param_cfg', 1, tf.float32, tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
P_CFG_1 = tf.get_variable('P_CFG_1', [self.embedding_size, self.embedding_size], tf.float32, tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
P_CFG_2 = tf.get_variable('P_CFG_2', [self.embedding_size, self.embedding_size], tf.float32, tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
L_CFG = tf.reshape(tf.matmul(cfg, mu_prev, transpose_a=True), (-1, self.embedding_size))
S_CFG =param_cfg*tf.reshape(tf.matmul(tf.nn.relu(tf.matmul(L_CFG, P_CFG_2)), P_CFG_1), (-1, self.embedding_size))
param_dfg = tf.get_variable('param_dfg', 1, tf.float32, tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
P_DFG_1 = tf.get_variable('P_DFG_1', [self.embedding_size, self.embedding_size], tf.float32, tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
P_DFG_2 = tf.get_variable('P_DFG_2', [self.embedding_size, self.embedding_size], tf.float32, tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
L_DFG = tf.reshape(tf.matmul(dfg, mu_prev, transpose_a=True), (-1, self.embedding_size))
S_DFG = param_dfg*tf.reshape(tf.matmul(tf.nn.relu(tf.matmul(L_DFG, P_DFG_2)), P_DFG_1), (-1, self.embedding_size))
return tf.tanh(tf.add(tf.add(tf.reshape(tf.matmul(tf.reshape(x, (-1, self.feature_num)), W_1), (-1, self.embedding_size)), S_CFG), S_DFG))
def structure2vec_net(self, cfgs, dfgs, x, v_num):
with tf.variable_scope("structure2vec_net") as structure2vec_net:
B_mu_5 = tf.Variable(tf.zeros(shape = [0, self.embedding_size]), trainable=False)
w_2 = tf.get_variable('w_2', [self.embedding_size, self.embedding_size], tf.float32, tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
for i in range(self.mini_batch):
cur_size = tf.to_int32(v_num[i][0])
mu_0 = tf.reshape(tf.zeros(shape = [cur_size, self.embedding_size]), (cur_size, self.embedding_size))
cfg = tf.slice(cfgs[i], [0, 0], [cur_size, cur_size])
dfg = tf.slice(dfgs[i], [0, 0], [cur_size, cur_size])
fea = tf.slice(x[i],[0,0], [cur_size, self.feature_num])
mu_1 = self.structure2vec(mu_0, cfg, dfg, fea)
structure2vec_net.reuse_variables()
mu_2 = self.structure2vec(mu_1, cfg, dfg, fea)
mu_3 = self.structure2vec(mu_2, cfg, dfg, fea)
mu_4 = self.structure2vec(mu_3, cfg, dfg, fea)
mu_5 = self.structure2vec(mu_4, cfg, dfg, fea)
B_mu_5 = tf.concat([B_mu_5,tf.matmul(tf.reshape(tf.reduce_sum(mu_5, 0), (1, self.embedding_size)), w_2)],0)
return B_mu_5
def cal_distance(self, model1, model2):
a_b = tf.reduce_sum(tf.reshape(tf.reduce_prod(tf.concat([tf.reshape(model1,(1,-1)), tf.reshape(model2,(1,-1))],0),0),(self.mini_batch,self.embedding_size)),1,keep_dims=True)
a_norm = tf.sqrt(tf.reduce_sum(tf.square(model1),1,keep_dims=True))
b_norm = tf.sqrt(tf.reduce_sum(tf.square(model2),1,keep_dims=True))
distance = a_b/tf.reshape(tf.reduce_prod(tf.concat([tf.reshape(a_norm,(1,-1)), tf.reshape(b_norm,(1,-1))],0),0),(self.mini_batch,1))
return distance
def get_batch(self, label, cfg_str1, cfg_str2, dfg_str1, dfg_str2, fea_str1, fea_str2, num1, num2, max_num):
y = np.reshape(label, [self.mini_batch, 1])
v_num_1 = []
v_num_2 = []
for i in range(self.mini_batch):
v_num_1.append([int(num1[i])])
v_num_2.append([int(num2[i])])
cfg_1 = []
cfg_2 = []
dfg_1 = []
dfg_2 = []
for i in range(self.mini_batch):
cfg_arr = np.array(cfg_str1[i].split(','))
cfg_adj = np.reshape(cfg_arr, (int(num1[i]), int(num1[i])))
cfg_ori1 = cfg_adj.astype(np.float32)
cfg_ori1.resize(int(max_num[i]), int(max_num[i]), refcheck=False)
cfg_1.append(cfg_ori1.tolist())
cfg_arr = np.array(cfg_str2[i].split(','))
cfg_adj = np.reshape(cfg_arr, (int(num2[i]), int(num2[i])))
cfg_ori2 = cfg_adj.astype(np.float32)
cfg_ori2.resize(int(max_num[i]), int(max_num[i]), refcheck=False)
cfg_2.append(cfg_ori2.tolist())
dfg_arr = np.array(dfg_str1[i].split(','))
dfg_adj = np.reshape(dfg_arr, (int(num1[i]), int(num1[i])))
dfg_ori1 = dfg_adj.astype(np.float32)
dfg_ori1.resize(int(max_num[i]), int(max_num[i]), refcheck=False)
dfg_1.append(dfg_ori1.tolist())
dfg_arr = np.array(dfg_str2[i].split(','))
dfg_adj = np.reshape(dfg_arr, (int(num2[i]), int(num2[i])))
dfg_ori2 = dfg_adj.astype(np.float32)
dfg_ori2.resize(int(max_num[i]), int(max_num[i]), refcheck=False)
dfg_2.append(dfg_ori2.tolist())
fea_1 = []
fea_2 = []
for i in range(self.mini_batch):
fea_arr = np.array(fea_str1[i].split(','))
fea_ori = fea_arr.astype(np.float32)
fea_vec1 = np.resize(fea_ori, (np.max(v_num_1), self.feature_num))
fea_1.append(fea_vec1)
fea_arr = np.array(fea_str2[i].split(','))
fea_ori= fea_arr.astype(np.float32)
fea_vec2 = np.resize(fea_ori, (np.max(v_num_2), self.feature_num))
fea_2.append(fea_vec2)
return y, cfg_1, cfg_2, dfg_1, dfg_2, fea_1, fea_2, v_num_1, v_num_2
def run(self):
tf.global_variables_initializer()
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(self.learning_rate, global_step, self.decay_steps, self.decay_rate, staircase=True)
v_num_left = tf.placeholder(tf.float32, shape=[self.mini_batch, 1], name='v_num_left')
cfg_left = tf.placeholder(tf.float32, shape=([self.mini_batch, None, None]), name='cfg_left')
dfg_left = tf.placeholder(tf.float32, shape=([self.mini_batch, None, None]), name='dfg_left')
fea_left = tf.placeholder(tf.float32, shape=([self.mini_batch, None, self.feature_num]), name='fea_left')
v_num_right = tf.placeholder(tf.float32, shape=[self.mini_batch, 1], name='v_num_right')
cfg_right = tf.placeholder(tf.float32, shape=([self.mini_batch, None, None]), name='cfg_right')
dfg_right = tf.placeholder(tf.float32, shape=([self.mini_batch, None, None]), name='dfg_right')
fea_right = tf.placeholder(tf.float32, shape=([self.mini_batch, None, self.feature_num]), name='fea_right')
labels = tf.placeholder(tf.float32, shape=([self.mini_batch, 1]), name='gt')
dropout_f = tf.placeholder("float")
with tf.variable_scope("siamese") as siamese:
model1 = self.structure2vec_net(cfg_left, dfg_left, fea_left, v_num_left)
siamese.reuse_variables()
model2 = self.structure2vec_net(cfg_right, dfg_right, fea_right, v_num_right)
dis = self.cal_distance(model1, model2)
loss = contrastive_loss(labels, dis)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
list_test_label, list_test_cfg_1, list_test_cfg_2, list_test_dfg_1, list_test_dfg_2, list_test_fea_1, \
list_test_fea_2, list_test_num1, list_test_num2, list_test_max = read_and_decode(self.test_tfrecord)
batch_test_label, batch_test_cfg_1, batch_test_cfg_2, batch_test_dfg_1, batch_test_dfg_2, batch_test_fea_1, \
batch_test_fea_2, batch_test_num1, batch_test_num2, batch_test_max \
= tf.train.batch([list_test_label, list_test_cfg_1, list_test_cfg_2, list_test_dfg_1, list_test_dfg_2,
list_test_fea_1, list_test_fea_2, list_test_num1, list_test_num2, list_test_max],
batch_size=self.mini_batch, capacity=10)
init_opt = tf.global_variables_initializer()
saver = tf.train.Saver()
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# read json from func_info_path
with open(self.func_info_path, 'r') as f:
func_info_dic = json.load(f)
result_dic = {}
with tf.Session() as sess:
writer = tf.summary.FileWriter('logs/', sess.graph)
# check whether to load exist models
if self.exist_model == "":
sess.run(init_opt)
else:
saver = tf.train.import_meta_graph(self.ckpt_file)
saver.restore(sess, tf.train.latest_checkpoint(self.exist_model))
self.logger.info("Loading models from %s" % self.ckpt_file)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# Training cycle
iter=0
self.load_csv_as_pair(self.test_file)
while iter < self.max_iter:
iter += 1
total_batch = int(self.test_num / self.mini_batch)
if iter % self.snapshot == 0:
total_labels = []
total_predicts = []
avg_loss = 0.
avg_acc = 0.
test_total_batch = int(self.test_num / self.mini_batch)
start_time = time.time()
for m in range(test_total_batch):
test_label, test_cfg_1, test_cfg_2, test_dfg_1, test_dfg_2, \
test_fea_1, test_fea_2, test_num1, test_num2, test_max = sess.run(
[batch_test_label, batch_test_cfg_1, batch_test_cfg_2, batch_test_dfg_1, batch_test_dfg_2, batch_test_fea_1, batch_test_fea_2, batch_test_num1, batch_test_num2, batch_test_max])
y, cfg_1, cfg_2, dfg_1, dfg_2, fea_1, fea_2, v_num_1, v_num_2 \
= self.get_batch(test_label, test_cfg_1, test_cfg_2, test_dfg_1, test_dfg_2,
test_fea_1, test_fea_2, test_num1, test_num2, test_max)
predict = dis.eval(
feed_dict={cfg_left: cfg_1, dfg_left: dfg_1, fea_left: fea_1, v_num_left: v_num_1, cfg_right: cfg_2,
dfg_right: dfg_2, fea_right: fea_2, v_num_right: v_num_2, labels: y, dropout_f: 1.0})
for k, p in enumerate(predict):
(id1, id2) = self.pair_list[y[k][0]]
result_dic[(func_info_dic[id1], func_info_dic[id2])] = p[0]
if m % 20 == 0:
self.logger.info("Testing: %s/%s" % (m, test_total_batch))
coord.request_stop()
coord.join(threads)
result_desc = sorted(result_dic.items(), key=lambda item:-item[1])
with open(self.result_file, "w") as f:
for r in result_desc:
f.write("%s\n%s\n%.4f\n\n" % (r[0][0], r[0][1], r[1]))
|
wangcong15/go-clone
|
Go-CloneF/src/tfrecord2test.py
|
tfrecord2test.py
|
py
| 15,894 |
python
|
en
|
code
| 5 |
github-code
|
6
|
27214868635
|
from enum import IntEnum, auto
from typing import List, Mapping, Union, Tuple, Optional
from .aetg import AETGGenerator
from .matrix import MatrixGenerator
from ...model import int_enum_loads
from ...reflection import progressive_for
__all__ = ['tmatrix']
@int_enum_loads(enable_int=False, name_preprocess=str.upper)
class MatrixMode(IntEnum):
AETG = auto()
MATRIX = auto()
def tmatrix(ranges: Mapping[Union[str, Tuple[str, ...]], List],
mode='aetg', seed: Optional[int] = 0, level: int = 2) -> Tuple[List[str], List[Tuple]]:
"""
Overview:
Test matrix generator, which can be used in ``pytest.mark.parameterize``.
:param ranges: Ranges of the values
:param mode: Mode of the matrix, should be one of the ``aetg`` or ``matrix``. Default is ``aetg``.
:param seed: Random seed, default is ``0`` which means the result is fixed (recommended).
:param level: Lavel of AETG generating algorithm, default is ``2``.
:returns: A tuple - ``(names, values)``.
Examples::
>>> from hbutils.testing import tmatrix
>>> names, values = tmatrix(
... {
... 'a': [2, 3],
... 'e': ['a', 'b', 'c'],
... ('b', 'c'): [(1, 7), (4, 6), (9, 12)],
... }
... )
>>> print(names)
['a', 'e', 'b', 'c']
>>> for i, v in enumerate(values):
... print(i, v)
0 (2, 'c', 9, 12)
1 (3, 'c', 4, 6)
2 (2, 'c', 1, 7)
3 (3, 'b', 9, 12)
4 (2, 'b', 4, 6)
5 (3, 'b', 1, 7)
6 (3, 'a', 9, 12)
7 (2, 'a', 4, 6)
8 (3, 'a', 1, 7)
.. note::
This can be directly used in ``pytest.mark.parametrize`` function.
>>> @pytest.mark.unittest
... class TestTestingGeneratorFunc:
... @pytest.mark.parametrize(*tmatrix({
... 'a': [2, 3],
... 'e': ['a', 'b', 'c'],
... ('b', 'c'): [(1, 7), (4, 6), (9, 12)],
... }))
... def test_tmatrix_usage(self, a, e, b, c):
... print(a, e, b, c)
"""
mode = MatrixMode.loads(mode)
key_map = {}
final_names = []
final_values = {}
for ki, (key, value) in enumerate(ranges.items()):
kname = f'key-{ki}'
key_map[kname] = key
final_names.append(kname)
final_values[kname] = value
names = []
for key in ranges.keys():
if isinstance(key, str):
names.append(key)
elif isinstance(key, tuple):
for k in key:
names.append(k)
if mode == MatrixMode.MATRIX:
generator = MatrixGenerator(final_values, final_names)
elif mode == MatrixMode.AETG:
generator = AETGGenerator(
final_values, final_names, rnd=seed,
pairs=list(progressive_for(final_names, min(level, len(names)))),
)
else:
raise ValueError(f'Invalid mode - {mode!r}.') # pragma: no cover
pairs = []
for case in generator.cases():
_v_case = {}
for name in final_names:
key = key_map[name]
if isinstance(key, str):
_v_case[key] = case[name]
elif isinstance(key, tuple):
for ikey, ivalue in zip(key, case[name]):
_v_case[ikey] = ivalue
pairs.append(tuple(_v_case[name] for name in names))
return names, pairs
|
HansBug/hbutils
|
hbutils/testing/generator/func.py
|
func.py
|
py
| 3,442 |
python
|
en
|
code
| 7 |
github-code
|
6
|
26113055515
|
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "28/06/2018"
import logging
import numpy
import weakref
import functools
from typing import Optional
from ....utils.weakref import WeakList
from ... import qt
from .. import items
from ..items import core
from ...colors import rgba
logger = logging.getLogger(__name__)
class _RegionOfInterestBase(qt.QObject):
"""Base class of 1D and 2D region of interest
:param QObject parent: See QObject
:param str name: The name of the ROI
"""
sigAboutToBeRemoved = qt.Signal()
"""Signal emitted just before this ROI is removed from its manager."""
sigItemChanged = qt.Signal(object)
"""Signal emitted when item has changed.
It provides a flag describing which property of the item has changed.
See :class:`ItemChangedType` for flags description.
"""
def __init__(self, parent=None):
qt.QObject.__init__(self)
if parent is not None:
self.setParent(parent)
self.__name = ''
def getName(self):
"""Returns the name of the ROI
:return: name of the region of interest
:rtype: str
"""
return self.__name
def setName(self, name):
"""Set the name of the ROI
:param str name: name of the region of interest
"""
name = str(name)
if self.__name != name:
self.__name = name
self._updated(items.ItemChangedType.NAME)
def _updated(self, event=None, checkVisibility=True):
"""Implement Item mix-in update method by updating the plot items
See :class:`~silx.gui.plot.items.Item._updated`
"""
self.sigItemChanged.emit(event)
def contains(self, position):
"""Returns True if the `position` is in this ROI.
:param tuple[float,float] position: position to check
:return: True if the value / point is consider to be in the region of
interest.
:rtype: bool
"""
return False # Override in subclass to perform actual test
class RoiInteractionMode(object):
"""Description of an interaction mode.
An interaction mode provide a specific kind of interaction for a ROI.
A ROI can implement many interaction.
"""
def __init__(self, label, description=None):
self._label = label
self._description = description
@property
def label(self):
"""Short name"""
return self._label
@property
def description(self):
"""Longer description of the interaction mode"""
return self._description
class InteractionModeMixIn(object):
"""Mix in feature which can be implemented by a ROI object.
This provides user interaction to switch between different
interaction mode to edit the ROI.
This ROI modes have to be described using `RoiInteractionMode`,
and taken into account during interation with handles.
"""
sigInteractionModeChanged = qt.Signal(object)
def __init__(self):
self.__modeId = None
def _initInteractionMode(self, modeId):
"""Set the mode without updating anything.
Must be one of the returned :meth:`availableInteractionModes`.
:param RoiInteractionMode modeId: Mode to use
"""
self.__modeId = modeId
def availableInteractionModes(self):
"""Returns the list of available interaction modes
Must be implemented when inherited to provide all available modes.
:rtype: List[RoiInteractionMode]
"""
raise NotImplementedError()
def setInteractionMode(self, modeId):
"""Set the interaction mode.
:param RoiInteractionMode modeId: Mode to use
"""
self.__modeId = modeId
self._interactiveModeUpdated(modeId)
self.sigInteractionModeChanged.emit(modeId)
def _interactiveModeUpdated(self, modeId):
"""Called directly after an update of the mode.
The signal `sigInteractionModeChanged` is triggered after this
call.
Must be implemented when inherited to take care of the change.
"""
raise NotImplementedError()
def getInteractionMode(self):
"""Returns the interaction mode.
Must be one of the returned :meth:`availableInteractionModes`.
:rtype: RoiInteractionMode
"""
return self.__modeId
def createMenuForInteractionMode(self, parent: qt.QWidget) -> qt.QMenu:
"""Create a menu providing access to the different interaction modes"""
availableModes = self.availableInteractionModes()
currentMode = self.getInteractionMode()
submenu = qt.QMenu(parent)
modeGroup = qt.QActionGroup(parent)
modeGroup.setExclusive(True)
for mode in availableModes:
action = qt.QAction(parent)
action.setText(mode.label)
action.setToolTip(mode.description)
action.setCheckable(True)
if mode is currentMode:
action.setChecked(True)
else:
callback = functools.partial(self.setInteractionMode, mode)
action.triggered.connect(callback)
modeGroup.addAction(action)
submenu.addAction(action)
submenu.setTitle("Interaction mode")
return submenu
class RegionOfInterest(_RegionOfInterestBase, core.HighlightedMixIn):
"""Object describing a region of interest in a plot.
:param QObject parent:
The RegionOfInterestManager that created this object
"""
_DEFAULT_LINEWIDTH = 1.
"""Default line width of the curve"""
_DEFAULT_LINESTYLE = '-'
"""Default line style of the curve"""
_DEFAULT_HIGHLIGHT_STYLE = items.CurveStyle(linewidth=2)
"""Default highlight style of the item"""
ICON, NAME, SHORT_NAME = None, None, None
"""Metadata to describe the ROI in labels, tooltips and widgets
Should be set by inherited classes to custom the ROI manager widget.
"""
sigRegionChanged = qt.Signal()
"""Signal emitted everytime the shape or position of the ROI changes"""
sigEditingStarted = qt.Signal()
"""Signal emitted when the user start editing the roi"""
sigEditingFinished = qt.Signal()
"""Signal emitted when the region edition is finished. During edition
sigEditionChanged will be emitted several times and
sigRegionEditionFinished only at end"""
def __init__(self, parent=None):
# Avoid circular dependency
from ..tools import roi as roi_tools
assert parent is None or isinstance(parent, roi_tools.RegionOfInterestManager)
_RegionOfInterestBase.__init__(self, parent)
core.HighlightedMixIn.__init__(self)
self.__text = None
self._color = rgba('red')
self._editable = False
self._selectable = False
self._focusProxy = None
self._visible = True
self._child = WeakList()
def _connectToPlot(self, plot):
"""Called after connection to a plot"""
for item in self.getItems():
# This hack is needed to avoid reentrant call from _disconnectFromPlot
# to the ROI manager. It also speed up the item tests in _itemRemoved
item._roiGroup = True
plot.addItem(item)
def _disconnectFromPlot(self, plot):
"""Called before disconnection from a plot"""
for item in self.getItems():
# The item could be already be removed by the plot
if item.getPlot() is not None:
del item._roiGroup
plot.removeItem(item)
def _setItemName(self, item):
"""Helper to generate a unique id to a plot item"""
legend = "__ROI-%d__%d" % (id(self), id(item))
item.setName(legend)
def setParent(self, parent):
"""Set the parent of the RegionOfInterest
:param Union[None,RegionOfInterestManager] parent: The new parent
"""
# Avoid circular dependency
from ..tools import roi as roi_tools
if (parent is not None and not isinstance(parent, roi_tools.RegionOfInterestManager)):
raise ValueError('Unsupported parent')
previousParent = self.parent()
if previousParent is not None:
previousPlot = previousParent.parent()
if previousPlot is not None:
self._disconnectFromPlot(previousPlot)
super(RegionOfInterest, self).setParent(parent)
if parent is not None:
plot = parent.parent()
if plot is not None:
self._connectToPlot(plot)
def addItem(self, item):
"""Add an item to the set of this ROI children.
This item will be added and removed to the plot used by the ROI.
If the ROI is already part of a plot, the item will also be added to
the plot.
It the item do not have a name already, a unique one is generated to
avoid item collision in the plot.
:param silx.gui.plot.items.Item item: A plot item
"""
assert item is not None
self._child.append(item)
if item.getName() == '':
self._setItemName(item)
manager = self.parent()
if manager is not None:
plot = manager.parent()
if plot is not None:
item._roiGroup = True
plot.addItem(item)
def removeItem(self, item):
"""Remove an item from this ROI children.
If the item is part of a plot it will be removed too.
:param silx.gui.plot.items.Item item: A plot item
"""
assert item is not None
self._child.remove(item)
plot = item.getPlot()
if plot is not None:
del item._roiGroup
plot.removeItem(item)
def getItems(self):
"""Returns the list of PlotWidget items of this RegionOfInterest.
:rtype: List[~silx.gui.plot.items.Item]
"""
return tuple(self._child)
@classmethod
def _getShortName(cls):
"""Return an human readable kind of ROI
:rtype: str
"""
if hasattr(cls, "SHORT_NAME"):
name = cls.SHORT_NAME
if name is None:
name = cls.__name__
return name
def getColor(self):
"""Returns the color of this ROI
:rtype: QColor
"""
return qt.QColor.fromRgbF(*self._color)
def setColor(self, color):
"""Set the color used for this ROI.
:param color: The color to use for ROI shape as
either a color name, a QColor, a list of uint8 or float in [0, 1].
"""
color = rgba(color)
if color != self._color:
self._color = color
self._updated(items.ItemChangedType.COLOR)
def isEditable(self):
"""Returns whether the ROI is editable by the user or not.
:rtype: bool
"""
return self._editable
def setEditable(self, editable):
"""Set whether the ROI can be changed interactively.
:param bool editable: True to allow edition by the user,
False to disable.
"""
editable = bool(editable)
if self._editable != editable:
self._editable = editable
self._updated(items.ItemChangedType.EDITABLE)
def isSelectable(self):
"""Returns whether the ROI is selectable by the user or not.
:rtype: bool
"""
return self._selectable
def setSelectable(self, selectable):
"""Set whether the ROI can be selected interactively.
:param bool selectable: True to allow selection by the user,
False to disable.
"""
selectable = bool(selectable)
if self._selectable != selectable:
self._selectable = selectable
self._updated(items.ItemChangedType.SELECTABLE)
def getFocusProxy(self):
"""Returns the ROI which have to be selected when this ROI is selected,
else None if no proxy specified.
:rtype: RegionOfInterest
"""
proxy = self._focusProxy
if proxy is None:
return None
proxy = proxy()
if proxy is None:
self._focusProxy = None
return proxy
def setFocusProxy(self, roi):
"""Set the real ROI which will be selected when this ROI is selected,
else None to remove the proxy already specified.
:param RegionOfInterest roi: A ROI
"""
if roi is not None:
self._focusProxy = weakref.ref(roi)
else:
self._focusProxy = None
def isVisible(self):
"""Returns whether the ROI is visible in the plot.
.. note::
This does not take into account whether or not the plot
widget itself is visible (unlike :meth:`QWidget.isVisible` which
checks the visibility of all its parent widgets up to the window)
:rtype: bool
"""
return self._visible
def setVisible(self, visible):
"""Set whether the plot items associated with this ROI are
visible in the plot.
:param bool visible: True to show the ROI in the plot, False to
hide it.
"""
visible = bool(visible)
if self._visible != visible:
self._visible = visible
self._updated(items.ItemChangedType.VISIBLE)
def getText(self) -> str:
"""Returns the currently displayed text for this ROI"""
return self.getName() if self.__text is None else self.__text
def setText(self, text: Optional[str] = None) -> None:
"""Set the displayed text for this ROI.
If None (the default), the ROI name is used.
"""
if self.__text != text:
self.__text = text
self._updated(items.ItemChangedType.TEXT)
def _updateText(self, text: str) -> None:
"""Update the text displayed by this ROI
Override in subclass to custom text display
"""
pass
@classmethod
def showFirstInteractionShape(cls):
"""Returns True if the shape created by the first interaction and
managed by the plot have to be visible.
:rtype: bool
"""
return False
@classmethod
def getFirstInteractionShape(cls):
"""Returns the shape kind which will be used by the very first
interaction with the plot.
This interactions are hardcoded inside the plot
:rtype: str
"""
return cls._plotShape
def setFirstShapePoints(self, points):
"""Initialize the ROI using the points from the first interaction.
This interaction is constrained by the plot API and only supports few
shapes.
"""
raise NotImplementedError()
def creationStarted(self):
"""Called when the ROI creation interaction was started.
"""
pass
def creationFinalized(self):
"""Called when the ROI creation interaction was finalized.
"""
pass
def _updateItemProperty(self, event, source, destination):
"""Update the item property of a destination from an item source.
:param items.ItemChangedType event: Property type to update
:param silx.gui.plot.items.Item source: The reference for the data
:param event Union[Item,List[Item]] destination: The item(s) to update
"""
if not isinstance(destination, (list, tuple)):
destination = [destination]
if event == items.ItemChangedType.NAME:
value = source.getName()
for d in destination:
d.setName(value)
elif event == items.ItemChangedType.EDITABLE:
value = source.isEditable()
for d in destination:
d.setEditable(value)
elif event == items.ItemChangedType.SELECTABLE:
value = source.isSelectable()
for d in destination:
d._setSelectable(value)
elif event == items.ItemChangedType.COLOR:
value = rgba(source.getColor())
for d in destination:
d.setColor(value)
elif event == items.ItemChangedType.LINE_STYLE:
value = self.getLineStyle()
for d in destination:
d.setLineStyle(value)
elif event == items.ItemChangedType.LINE_WIDTH:
value = self.getLineWidth()
for d in destination:
d.setLineWidth(value)
elif event == items.ItemChangedType.SYMBOL:
value = self.getSymbol()
for d in destination:
d.setSymbol(value)
elif event == items.ItemChangedType.SYMBOL_SIZE:
value = self.getSymbolSize()
for d in destination:
d.setSymbolSize(value)
elif event == items.ItemChangedType.VISIBLE:
value = self.isVisible()
for d in destination:
d.setVisible(value)
else:
assert False
def _updated(self, event=None, checkVisibility=True):
if event == items.ItemChangedType.TEXT:
self._updateText(self.getText())
elif event == items.ItemChangedType.HIGHLIGHTED:
style = self.getCurrentStyle()
self._updatedStyle(event, style)
else:
styleEvents = [items.ItemChangedType.COLOR,
items.ItemChangedType.LINE_STYLE,
items.ItemChangedType.LINE_WIDTH,
items.ItemChangedType.SYMBOL,
items.ItemChangedType.SYMBOL_SIZE]
if self.isHighlighted():
styleEvents.append(items.ItemChangedType.HIGHLIGHTED_STYLE)
if event in styleEvents:
style = self.getCurrentStyle()
self._updatedStyle(event, style)
super(RegionOfInterest, self)._updated(event, checkVisibility)
# Displayed text has changed, send a text event
if event == items.ItemChangedType.NAME and self.__text is None:
self._updated(items.ItemChangedType.TEXT, checkVisibility)
def _updatedStyle(self, event, style):
"""Called when the current displayed style of the ROI was changed.
:param event: The event responsible of the change of the style
:param items.CurveStyle style: The current style
"""
pass
def getCurrentStyle(self):
"""Returns the current curve style.
Curve style depends on curve highlighting
:rtype: CurveStyle
"""
baseColor = rgba(self.getColor())
if isinstance(self, core.LineMixIn):
baseLinestyle = self.getLineStyle()
baseLinewidth = self.getLineWidth()
else:
baseLinestyle = self._DEFAULT_LINESTYLE
baseLinewidth = self._DEFAULT_LINEWIDTH
if isinstance(self, core.SymbolMixIn):
baseSymbol = self.getSymbol()
baseSymbolsize = self.getSymbolSize()
else:
baseSymbol = 'o'
baseSymbolsize = 1
if self.isHighlighted():
style = self.getHighlightedStyle()
color = style.getColor()
linestyle = style.getLineStyle()
linewidth = style.getLineWidth()
symbol = style.getSymbol()
symbolsize = style.getSymbolSize()
return items.CurveStyle(
color=baseColor if color is None else color,
linestyle=baseLinestyle if linestyle is None else linestyle,
linewidth=baseLinewidth if linewidth is None else linewidth,
symbol=baseSymbol if symbol is None else symbol,
symbolsize=baseSymbolsize if symbolsize is None else symbolsize)
else:
return items.CurveStyle(color=baseColor,
linestyle=baseLinestyle,
linewidth=baseLinewidth,
symbol=baseSymbol,
symbolsize=baseSymbolsize)
def _editingStarted(self):
assert self._editable is True
self.sigEditingStarted.emit()
def _editingFinished(self):
self.sigEditingFinished.emit()
def populateContextMenu(self, menu: qt.QMenu):
"""Populate a menu used as a context menu"""
pass
class HandleBasedROI(RegionOfInterest):
"""Manage a ROI based on a set of handles"""
def __init__(self, parent=None):
RegionOfInterest.__init__(self, parent=parent)
self._handles = []
self._posOrigin = None
self._posPrevious = None
def addUserHandle(self, item=None):
"""
Add a new free handle to the ROI.
This handle do nothing. It have to be managed by the ROI
implementing this class.
:param Union[None,silx.gui.plot.items.Marker] item: The new marker to
add, else None to create a default marker.
:rtype: silx.gui.plot.items.Marker
"""
return self.addHandle(item, role="user")
def addLabelHandle(self, item=None):
"""
Add a new label handle to the ROI.
This handle is not draggable nor selectable.
It is displayed without symbol, but it is always visible anyway
the ROI is editable, in order to display text.
:param Union[None,silx.gui.plot.items.Marker] item: The new marker to
add, else None to create a default marker.
:rtype: silx.gui.plot.items.Marker
"""
return self.addHandle(item, role="label")
def addTranslateHandle(self, item=None):
"""
Add a new translate handle to the ROI.
Dragging translate handles affect the position position of the ROI
but not the shape itself.
:param Union[None,silx.gui.plot.items.Marker] item: The new marker to
add, else None to create a default marker.
:rtype: silx.gui.plot.items.Marker
"""
return self.addHandle(item, role="translate")
def addHandle(self, item=None, role="default"):
"""
Add a new handle to the ROI.
Dragging handles while affect the position or the shape of the
ROI.
:param Union[None,silx.gui.plot.items.Marker] item: The new marker to
add, else None to create a default marker.
:rtype: silx.gui.plot.items.Marker
"""
if item is None:
item = items.Marker()
color = rgba(self.getColor())
color = self._computeHandleColor(color)
item.setColor(color)
if role == "default":
item.setSymbol("s")
elif role == "user":
pass
elif role == "translate":
item.setSymbol("+")
elif role == "label":
item.setSymbol("")
if role == "user":
pass
elif role == "label":
item._setSelectable(False)
item._setDraggable(False)
item.setVisible(True)
else:
self.__updateEditable(item, self.isEditable(), remove=False)
item._setSelectable(False)
self._handles.append((item, role))
self.addItem(item)
return item
def removeHandle(self, handle):
data = [d for d in self._handles if d[0] is handle][0]
self._handles.remove(data)
role = data[1]
if role not in ["user", "label"]:
if self.isEditable():
self.__updateEditable(handle, False)
self.removeItem(handle)
def getHandles(self):
"""Returns the list of handles of this HandleBasedROI.
:rtype: List[~silx.gui.plot.items.Marker]
"""
return tuple(data[0] for data in self._handles)
def _updated(self, event=None, checkVisibility=True):
"""Implement Item mix-in update method by updating the plot items
See :class:`~silx.gui.plot.items.Item._updated`
"""
if event == items.ItemChangedType.VISIBLE:
for item, role in self._handles:
visible = self.isVisible()
editionVisible = visible and self.isEditable()
if role not in ["user", "label"]:
item.setVisible(editionVisible)
else:
item.setVisible(visible)
elif event == items.ItemChangedType.EDITABLE:
for item, role in self._handles:
editable = self.isEditable()
if role not in ["user", "label"]:
self.__updateEditable(item, editable)
super(HandleBasedROI, self)._updated(event, checkVisibility)
def _updatedStyle(self, event, style):
super(HandleBasedROI, self)._updatedStyle(event, style)
# Update color of shape items in the plot
color = rgba(self.getColor())
handleColor = self._computeHandleColor(color)
for item, role in self._handles:
if role == 'user':
pass
elif role == 'label':
item.setColor(color)
else:
item.setColor(handleColor)
def __updateEditable(self, handle, editable, remove=True):
# NOTE: visibility change emit a position update event
handle.setVisible(editable and self.isVisible())
handle._setDraggable(editable)
if editable:
handle.sigDragStarted.connect(self._handleEditingStarted)
handle.sigItemChanged.connect(self._handleEditingUpdated)
handle.sigDragFinished.connect(self._handleEditingFinished)
else:
if remove:
handle.sigDragStarted.disconnect(self._handleEditingStarted)
handle.sigItemChanged.disconnect(self._handleEditingUpdated)
handle.sigDragFinished.disconnect(self._handleEditingFinished)
def _handleEditingStarted(self):
super(HandleBasedROI, self)._editingStarted()
handle = self.sender()
self._posOrigin = numpy.array(handle.getPosition())
self._posPrevious = numpy.array(self._posOrigin)
self.handleDragStarted(handle, self._posOrigin)
def _handleEditingUpdated(self):
if self._posOrigin is None:
# Avoid to handle events when visibility change
return
handle = self.sender()
current = numpy.array(handle.getPosition())
self.handleDragUpdated(handle, self._posOrigin, self._posPrevious, current)
self._posPrevious = current
def _handleEditingFinished(self):
handle = self.sender()
current = numpy.array(handle.getPosition())
self.handleDragFinished(handle, self._posOrigin, current)
self._posPrevious = None
self._posOrigin = None
super(HandleBasedROI, self)._editingFinished()
def isHandleBeingDragged(self):
"""Returns True if one of the handles is currently being dragged.
:rtype: bool
"""
return self._posOrigin is not None
def handleDragStarted(self, handle, origin):
"""Called when an handler drag started"""
pass
def handleDragUpdated(self, handle, origin, previous, current):
"""Called when an handle drag position changed"""
pass
def handleDragFinished(self, handle, origin, current):
"""Called when an handle drag finished"""
pass
def _computeHandleColor(self, color):
"""Returns the anchor color from the base ROI color
:param Union[numpy.array,Tuple,List]: color
:rtype: Union[numpy.array,Tuple,List]
"""
return color[:3] + (0.5,)
|
silx-kit/silx
|
src/silx/gui/plot/items/_roi_base.py
|
_roi_base.py
|
py
| 27,769 |
python
|
en
|
code
| 106 |
github-code
|
6
|
28765664515
|
from async_scrape import Scrape
import requests
import json
from selenium.webdriver import Edge
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
url = "https://order.marstons.co.uk/"
base_dir = "C:/Users/robert.franklin/Desktop/local_projects/random/marstons"
# GET ALL RESTAURANT DATA - selenium
browser = Edge()
browser.get(url)
wait = WebDriverWait(browser, 100).until(
EC.presence_of_all_elements_located((By.CLASS_NAME, "venues-list"))
)
elements = browser.find_elements(By.CLASS_NAME, "venue-card")
hrefs = [e.get_dom_attribute("href") for e in elements]
browser.close()
print(f"Fetched {len(hrefs)} hrefs from {url}")
def post_process_func(html, resp, *args, **kwargs):
# Save to file
fn = resp.url.split("/")[-1]
content = json.loads(resp.content)
with open(f"{base_dir}/data/raw/{fn}.json", "w") as f:
json.dump(content, f, indent=4)
base_url = "https://api-cdn.orderbee.co.uk/venues"
urls = [base_url + href for href in hrefs]
scrape = Scrape(post_process_func=post_process_func)
print(f"Begin scrape of {len(urls)} - Example: {urls[0]}")
scrape.scrape_all(urls)
|
cia05rf/marstons
|
webscrape/scrape.py
|
scrape.py
|
py
| 1,223 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7090155614
|
import random
def limiter(count, num):
while count != 0:
try:
guess = int(input('Guess a number: '))
count -= 1
if guess == (random.randint(1, num)):
print('YOU GOT IT RIGHT!')
break
else:
print("\n That was Wrong!")
if count > 1:
print(f'You have {count} guesses left\n')
else:
print(f'You have {count} guess left\n')
except UnboundLocalError:
print("Variable 'guess' not defined")
except ValueError:
print("Invalid value, please input a number")
else:
print('You ran out of guessing Life')
print('GMAE OVER!')
def easy():
print(''' __EASY LEVEL__ You have 6 GUESSES....''')
limiter(6, 10)
def medium():
print(''' __MEDIUM LEVEL__ You have 4 guesses...''')
limiter(4, 20)
def hard():
print(''' __HARD LEVEL__ You have 3 guesses...''')
limiter(3, 50)
def choice():
user_choice = input('Enter your desired level: ').upper()
if user_choice == 'EASY':
easy()
elif user_choice == 'MEDIUM':
medium()
elif user_choice == 'HARD':
hard()
else:
print("Not a valid LEVEL, try again")
choice()
print('''There are 3 levels;
EASY,
MEDIUM,
HARD \n''')
choice()
|
jan-far/Guessing_game
|
task3.py
|
task3.py
|
py
| 1,395 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70501561789
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('django_sprinkler',
url(r"^get_context/?", "views.get_context", name="get_context"),
url(r"^logs/?", "views.watering_logs", name="watering_logs"),
url(r"^toggle_valve/(\d+)?/?", "views.toggle_valve", name="toggle_valve"),
url(r"^activate_program/(\d+)?/?", "views.activate_program", name="activate_program"),
url(r"^set_state/(\w+)?", "views.set_state", name="set_state"),
url(r'^$', "views.home", name="home"),
)
|
jpardobl/django_sprinkler
|
django_sprinkler/urls.py
|
urls.py
|
py
| 517 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44364885746
|
'''
3. (fatores) Programa que lê um número inteiro positivo n e determina a sua decomposição
em fatores primos calculando também a multiplicidade de cada fator.
'''
def main():
n = int(input("Digite um numero (>1): "))
fator = 2 # primeiro fator
while n != 1:
# conta a multiplicidade de fator em n
mult = 0;
while n%fator == 0:
n = n / fator;
mult = mult + 1;
# imprime a multiplicade do fator
if mult != 0:
print("fator %d multiplicidade %d" %(fator, mult))
fator = fator + 1
#-------------------------------------------------------
main() # chamada da função principal
|
danilosheen/topicos-especiais
|
q3.py
|
q3.py
|
py
| 681 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
72777565307
|
# Plotting solution of x''(t) + x(t) = 0 equation
import numpy as np
import matplotlib.pyplot as plt
import os
from io import StringIO
import pandas as pd
from find_solution import find_solution
from plot_utils import create_dir
def plot_solution(plot_dir, t_end, delta_t):
data = find_solution(t_end=t_end, delta_t=delta_t, print_last=False)
create_dir(plot_dir)
if data is None:
return
df = pd.read_csv(StringIO(data), skipinitialspace=True)
plt.plot(df['t'], df['x'], label='Approximation')
exact_t = np.arange(0.0, t_end, 0.01)
exact_x = np.cos(exact_t)
plt.plot(exact_t, exact_x, label='Exact $x=\cos(t)$', linestyle='--')
plt.title(r'Solution of $\ddot{x} + x = 0, x(0)=1, \dot{x}(0)=0$ for dt=' + f'{delta_t}')
plt.xlabel('t')
plt.ylabel(r'x')
plt.legend()
plt.grid()
plt.tight_layout()
plotfile = os.path.join(plot_dir, f"approx_vs_exact_dt_{delta_t}.pdf")
plt.savefig(plotfile)
plt.show()
if __name__ == '__main__':
plot_solution(plot_dir="plots", t_end=6.28, delta_t=1)
plot_solution(plot_dir="plots", t_end=6.28, delta_t=0.1)
|
evgenyneu/ASP3162
|
03_second_order_ode/plotting/plot_solution.py
|
plot_solution.py
|
py
| 1,130 |
python
|
en
|
code
| 1 |
github-code
|
6
|
28382656931
|
import cgi
import sys
import io
import genshin.database.operation as gdo
form = cgi.FieldStorage()
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
template = """
<html>
<head>
<meta charset="utf-8">
<script type="text/javascript">
location.replace('/cgi-bin/characters.py?dname={name}');
</script>
</head>
<body>
<p>Deleting...</p>
</body>
</html>
"""
def delete_character_data(name):
gdo.delete_character(name)
def main():
name = form.getvalue("del")
delete_character_data(name)
print("Content-type: text/html\n")
print(template.format(name=name))
main()
|
waigoma/genshin-charatraining-supporter
|
src/cgi-bin/character_delete.py
|
character_delete.py
|
py
| 628 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17717247977
|
import image
def photo_to_bw(filename):
img=image.Image(filename)
win=image.ImageWin(img.getWidth(),img.getHeight())
img.draw(win)
img.setDelay(0)
for row in range(img.getHeight()):
for col in range(img.getWidth()):
p=img.getPixel(col,row)
newvalue=(p.getRed()+p.getGreen()+p.getBlue())//3
if newvalue>127:
pixelvalue=255
else:
pixelvalue=0
newpixel=image.Pixel(pixelvalue,pixelvalue,pixelvalue)
img.setPixel(col,row,newpixel)
img.draw(win)
win.exitonclick()
photo_to_bw("luther.jpg")
|
tim24jones/thinkcspy
|
Chap_8/09_photo_to_bw.py
|
09_photo_to_bw.py
|
py
| 627 |
python
|
en
|
code
| 5 |
github-code
|
6
|
8331488278
|
#!/usr/bin/env python
# two functions "dir" and "help" when exploring modules in python.
import urllib
# function are implemented in each module by using dir function.
dir(urllib)
# read about module more using help function.
help(urllib.urlopen)
import re
find_members = []
for member in dir(re):
if "find" in member:
find_members.append(member)
print(sorted(find_members))
|
igei-yh/learning-python
|
basic_modules.py
|
basic_modules.py
|
py
| 397 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21944300528
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 7 23:31:33 2018
@author: liguo
异常数据分析
"""
from __future__ import print_function
from nets import nets_factory
from preprocessing import vgg_preprocessing
import sys
sys.path.append('../../tensorflow/models/slim/') # add slim to PYTHONPATH
import tensorflow as tf
import os
import time
import shutil
import pandas as pd
slim = tf.contrib.slim
tf.app.flags.DEFINE_integer('num_classes', 2, 'The number of classes.')
tf.app.flags.DEFINE_string('infile', '../test', 'Image file, one image per line.')
tf.app.flags.DEFINE_string('model_name', 'resnet_v1_50', 'The name of the architecture to testuate.')
tf.app.flags.DEFINE_string('preprocessing_name', None, 'The name of the preprocessing to use. If left as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_string('checkpoint_path', 'checkpoint/','The directory where the model was written to or an absolute path to a checkpoint file.')
tf.app.flags.DEFINE_integer('test_image_size', None, 'test image size.')
tf.app.flags.DEFINE_string('outliers_path', 'outliers', 'The path to save outliers images.')
FLAGS = tf.app.flags.FLAGS
model_name_to_variables = {'resnet_v1_50':'resnet_v1_50', 'vgg_16':'vgg_16'}
def main(_):
model_variables = model_name_to_variables.get(FLAGS.model_name)
if model_variables is None:
tf.logging.error("Unknown model_name provided `%s`." % FLAGS.model_name)
sys.exit(-1)
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
# 读入图像、预处理模型、网络模型
image_string = tf.placeholder(tf.string)
image = tf.image.decode_jpeg(image_string, channels=3, try_recover_truncated=True, acceptable_fraction=0.3)
network_fn = nets_factory.get_network_fn(FLAGS.model_name, FLAGS.num_classes, is_training=False)
# 数据预处理
if FLAGS.test_image_size is None:
test_image_size = network_fn.default_image_size
processed_image = vgg_preprocessing.preprocess_image(image, test_image_size, test_image_size, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
# 获取输出
logits, _ = network_fn(processed_images)
probabilities = tf.nn.softmax(logits)
# 初始化
init_fn = slim.assign_from_checkpoint_fn(checkpoint_path, slim.get_model_variables(model_variables))
sess = tf.Session()
init_fn(sess)
start_time = time.time()
# 进行推断
result = []
test_images = os.listdir(FLAGS.infile)
for test_image in test_images:
path = os.path.join(FLAGS.infile, test_image)
content = tf.gfile.FastGFile(path, 'rb').read()
_logits, _prob = sess.run([logits, probabilities], feed_dict={image_string:content})
sum_squares = _logits[0, 0] * _logits[0, 0] + _logits[0, 1] * _logits[0, 1]
_prob = _prob[0, 0:]
_prob = _prob[1]
classes = 'cat' if 'cat' in test_image else 'dog'
result.append([path, test_image, classes, sum_squares, _prob, _logits[0, 0], _logits[0, 1]])
sess.close()
# 将结果输出到csv文件
path_list = []
name_list = []
class_list = []
sum_squares_list = []
prob_list = []
logits1_list = []
logits2_list = []
for item in result:
path_list.append(item[0])
name_list.append(item[1])
class_list.append(item[2])
sum_squares_list.append(item[3])
prob_list.append(item[4])
logits1_list.append(item[5])
logits2_list.append(item[6])
dataframe = pd.DataFrame({'path':path_list, 'name':name_list, 'class':class_list,
'sum_squares':sum_squares_list, 'prob':prob_list,
'logits1':logits1_list, 'logits2':logits2_list})
dataframe.to_csv("outliers.csv", index=False, sep=',')
if not os.path.exists(FLAGS.outliers_path):
os.makedirs(FLAGS.outliers_path)
# 输出sum_squares最小的部分图片
all_path = os.path.join(FLAGS.outliers_path, 'min_sum_squares')
if not os.path.exists(all_path):
os.makedirs(all_path)
for i in range(min(500, len(result))):
for j in range(i+1, len(result)):
if result[i][3] > result[j][3]:
temp = result[i]
result[i] = result[j]
result[j] = temp
shutil.copyfile(result[i][0], os.path.join(all_path, format(i, "3d")+"_"+result[i][1]))
# 输出cat中最难识别的部分图片
cat_path = os.path.join(FLAGS.outliers_path, 'cat_max_logits')
if not os.path.exists(cat_path):
os.makedirs(cat_path)
for i in range(min(250, len(result))):
for j in range(i+1, len(result)):
if (result[j][2] == 'cat') and (result[i][2] == 'dog' or result[i][4] < result[j][4]):
temp = result[i]
result[i] = result[j]
result[j] = temp
shutil.copyfile(result[i][0], os.path.join(cat_path, format(result[i][4], ".3f")+"_"+result[i][1]))
# 输出dog中最难识别的部分图片
dog_path = os.path.join(FLAGS.outliers_path, 'dog_min_logits')
if not os.path.exists(dog_path):
os.makedirs(dog_path)
for i in range(min(250, len(result))):
for j in range(i+1, len(result)):
if (result[j][2] == 'dog') and (result[i][2] == 'cat' or result[i][4] > result[j][4]):
temp = result[i]
result[i] = result[j]
result[j] = temp
shutil.copyfile(result[i][0], os.path.join(dog_path, format(result[i][4], ".3f")+"_"+result[i][1]))
print('total time cost = %.2f' %(time.time() - start_time))
if __name__ == '__main__':
tf.app.run()
|
wlkdb/dogs_vs_cats
|
transfer_learning/analysis_outliers.py
|
analysis_outliers.py
|
py
| 5,865 |
python
|
en
|
code
| 10 |
github-code
|
6
|
30804272942
|
#Exercise 1: Cats
#Instantiate three Cat objects using the code provided above.
#Outside of the class, create a function that finds the oldest cat and returns the cat.
#Print the following string: “The oldest cat is <cat_name>, and is <cat_age> years old.”. Use the function previously created.
class Cat:
def __init__(self, cat_name, cat_age):
self.name = cat_name
self.age = cat_age
cat1 = Cat("Malfoy", 3)
cat2 = Cat("Fluffy", 6)
cat3 = Cat("Germy", 8)
def find_oldest_cat(*cats):
oldest_cat = None
for cat in cats:
if oldest_cat is None or cat.age > oldest_cat.age:
oldest_cat = cat
return oldest_cat
oldest_cat = find_oldest_cat(cat1, cat2, cat3)
print(f"The oldest cat is {oldest_cat.name}, and he is {oldest_cat.age} years old.")
# Exercise 2 : Dogs
class Dog:
def __init__(self, name, height):
self.name = name
self.height = height
def bark(self):
print(f"{self.name} goes woof!")
def jump(self):
print(f"{self.name} jumps {self.height * 2} cm high!")
davids_dog = Dog("Rex", 50)
print(f"David's dog is named {davids_dog.name} and is {davids_dog.height}cm tall.")
davids_dog.bark()
davids_dog.jump()
sarahs_dog = Dog("Teacup", 20)
print(f"Sarah's dog is named {sarahs_dog.name} and is {sarahs_dog.height}cm tall.")
sarahs_dog.bark()
sarahs_dog.jump()
if davids_dog.height > sarahs_dog.height:
print(f"{davids_dog.name} is bigger.")
else:
print(f"{sarahs_dog.name} is bigger.")
#Exercise 3 : Who’s The Song Producer?
# a class called Song, it will show the lyrics of a song.
#Inside your class create a method called sing_me_a_song that prints each element of lyrics on its own line.
#Create an object, for example:
#stairway= Song(["There’s a lady who's sure","all that glitters is gold", "and she’s buying a stairway to heaven"])
class Song:
def __init__(self, lyrics):
self.lyrics = lyrics
def sing_me_a_song(self):
for line in self.lyrics:
print(line)
stairway = Song(["We all live in a yellow submarine", "Yellow submarine, yellow submarine", "We all live in a yellow submarine", "Yellow submarine, yellow submarine"])
stairway.sing_me_a_song()
#Exercise 4 : Afternoon At The Zoo
class Zoo:
def __init__(self, zoo_name):
self.name = zoo_name
self.animals = []
def add_animal(self, new_animal):
if new_animal not in self.animals:
self.animals.append(new_animal)
def get_animals(self):
print("Animals in the zoo:")
for animal in self.animals:
print(animal)
def sell_animal(self, animal_sold):
if animal_sold in self.animals:
self.animals.remove(animal_sold)
def sort_animals(self):
animal_dict = {}
for animal in self.animals:
if animal[0] not in animal_dict:
animal_dict[animal[0]] = [animal]
else:
animal_dict[animal[0]].append(animal)
sorted_animals = sorted(animal_dict.items())
for key, value in sorted_animals:
print(key + ": ", end="")
print(", ".join(value))
def get_groups(self):
animal_dict = {}
for animal in self.animals:
if animal[0] not in animal_dict:
animal_dict[animal[0]] = [animal]
else:
animal_dict[animal[0]].append(animal)
for key, value in animal_dict.items():
print(key + ": ", end="")
print(", ".join(value))
# Create an object called ramat_gan_safari and call all the methods
ramat_gan_safari = Zoo("Ramat Gan Safari")
ramat_gan_safari.add_animal("Giraffe")
ramat_gan_safari.add_animal("Baboon")
ramat_gan_safari.add_animal("Bear")
ramat_gan_safari.add_animal("Cat")
ramat_gan_safari.add_animal("Cougar")
ramat_gan_safari.add_animal("Eel")
ramat_gan_safari.add_animal("Emu")
ramat_gan_safari.get_animals()
ramat_gan_safari.sell_animal("Eel")
ramat_gan_safari.sort_animals()
ramat_gan_safari.get_groups()
|
nadinebabenko/python1
|
week20/day2/XP.py
|
XP.py
|
py
| 4,157 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25004993355
|
from typing import cast, Any
from aea.skills.behaviours import TickerBehaviour
from aea.helpers.search.models import Constraint, ConstraintType, Query
from packages.fetchai.protocols.oef_search.message import OefSearchMessage
from packages.fetchai.skills.tac_control.dialogues import (
OefSearchDialogues,
)
from packages.gdp8.skills.agent_action_each_turn.strategy import BasicStrategy
DEFAULT_REGISTER_AND_SEARCH_INTERVAL = 5.0
environmentFound = False
DEFAULT_SEARCH_QUERY = {
"search_key": "env",## is that the key of the environment ?
"search_value": "v1",
"constraint_type": "==",
}
class EnvSearchBehaviour(TickerBehaviour):
"""This class scaffolds a behaviour."""
def setup(self) -> None:
"""
Implement the setup.
:return: None
"""
def act(self) -> None:
"""
Implement the act.
:return: None
"""
if not environmentFound:
self._search_for_environment()
def teardown(self) -> None:
"""
Implement the task teardown.
:return: None
"""
def _search_for_environment(self) -> None:
"""
Search for active environment (simulation controller).
We assume that the environment is registered as a service
(and with an attribute version = expected_version_id ## ??? do we really need to have that attribute ?)
:return: None
"""
## can add a filter: close to my service if there are too many results
service_key_filter = Constraint(
DEFAULT_SEARCH_QUERY["search_key"],
ConstraintType(
DEFAULT_SEARCH_QUERY["constraint_type"],
DEFAULT_SEARCH_QUERY["search_value"],
),
)
query = Query([service_key_filter],)
oef_search_dialogues = cast(
OefSearchDialogues, self.context.oef_search_dialogues
)
oef_search_msg, _ = oef_search_dialogues.create(
counterparty=self.context.search_service_address,
performative=OefSearchMessage.Performative.SEARCH_SERVICES,
query=query,
)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info(
"searching for environment, search_id={}".format(oef_search_msg.dialogue_reference)
)
class AgentLogicBehaviour(TickerBehaviour):
"""Behaviour looks at if actions required in each tick:
is there agent asking for water info? if so, tell them
is the round done (on my end)? if so, stop
is there enough info for making a decision? if so, do so,
if not, might have to send message to ask for info"""
def setup(self) -> None:
"""
Implement the setup.
:return: None
"""
pass
def act(self) -> None:
strategy = cast(BasicStrategy, self.context.strategy)
there_is_agent_asking_for_water_info = True
while there_is_agent_asking_for_water_info:
there_is_agent_asking_for_water_info = strategy.deal_with_an_agent_asking_for_water_info
if not strategy.is_round_done:
info_is_enough = strategy.enough_info_to_make_decision
if info_is_enough:
strategy.make_decision_send_to_env()
else:
asking_for_info = True
while asking_for_info:
asking_for_info = strategy.potentially_ask_for_info
def teardown(self) -> None:
"""
Implement the task teardown.
:return: None
"""
pass
|
DENE-dev/dene-dev
|
RQ1-data/exp2/1010-OCzarnecki@gdp8-e6988c211a76ac3a2736d49d00f0a6de8b44c3b0/agent_aea/skills/agent_action_each_turn/behaviours.py
|
behaviours.py
|
py
| 3,601 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31111183004
|
def getMoneySpent(keyboards, drives, b):
budget_arr = []
for keyboard in keyboards:
if keyboards == b:
continue
for drive in drives:
if drive == b:
continue
if (keyboard + drive) <= b:
budget_arr.append(keyboard + drive)
if not budget_arr:
return -1
else:
return max(budget_arr)
if __name__ == '__main__':
maxbudget = getMoneySpent([4], [5], 5)
print(maxbudget)
|
spl99615/hackerrank
|
electronic_shop.py
|
electronic_shop.py
|
py
| 488 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26470849611
|
""" Problem 34: Digit Factorials
https://projecteuler.net/problem=34
Goal: Find the sum of all numbers less than N that divide the sum of the factorial
of their digits (& therefore have minimum 2 digits).
Constraints: 10 <= N <= 1e5
Factorion: A natural number that equals the sum of the factorials of its digits.
The only non-single-digit factorions are: 145 and 40585.
e.g.: N = 20
qualifying numbers = {19}
as 1! + 9! = 362_881, which % 19 = 0
e.g. 18 does not work as 1! + 8! = 40321, which % 18 > 0
sum = 19
"""
from math import factorial
# pre-calculation of all digit factorials to increase performance
factorials = [factorial(x) for x in range(10)]
def sum_of_digit_factorials_HR(n: int) -> int:
"""
HackerRank specific implementation that finds the sum of all numbers < n that
are divisors of the sum of the factorials of their digits.
"""
overall_total = 0
for num in range(10, n):
num_total = sum([factorials[int(ch)] for ch in str(num)])
if num_total % num == 0:
overall_total += num
return overall_total
def sum_of_digit_factorials_PE() -> int:
"""
Project Euler specific implementation that finds the sum of all numbers that
are factorions.
The numbers cannot have more than 7 digits, as 9! * 8 returns only a 7-digit
number. 9! * 7 returns 2_540_160, so the 1st digit of the 7-digit number cannot
be greater than 2.
"""
overall_total = 0
for n in range(10, 2_000_000):
digits = [int(ch) for ch in str(n)]
n_total = 0
for digit in digits:
n_total += factorials[digit]
# prevents further unnecessary calculation
if n_total > n:
break
if n_total == n:
overall_total += n_total
return overall_total
|
bog-walk/project-euler-python
|
solution/batch3/problem34.py
|
problem34.py
|
py
| 1,839 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2282915747
|
import torch
from torch import Tensor
from kornia.utils import one_hot
import torch.nn.functional as F
import numpy as np
from matplotlib import pyplot as plt
def reg_loss(prediction, ED, ES, device):
# print(prediction)
prediction_toSyn = prediction.squeeze().detach().cpu().numpy()
y_k = synthetic_label(prediction_toSyn, ED.numpy(), ES.numpy())
# print(prediction.squeeze())
# print(y_k)
# print(ED)
# print(ES)
# print('-----')
mse_loss = F.mse_loss(prediction.squeeze(), y_k.to(device))
temp_loss = ltemp(y_k, prediction_toSyn)
loss = mse_loss + temp_loss
return loss
def synthetic_label(prediction, ED, ES):
y_k = []
for k in range(len(prediction)):
if (int(ED) < k) and (k <= int(ES)):
y_k.append((abs((k-ES)/(ES-ED)))**3)
# print(1)
else:
y_k.append((abs((k-ES)/(ES-ED)))**(1/3))
# print(y_k)
# plt.plot(y_k)
# plt.savefig('y_k.png')
return torch.from_numpy(np.array(y_k, dtype= "float32"))
def ltemp(y_k, prediction):
Linc = linc(y_k, prediction)
Ldec = ldec(y_k, prediction)
ltemp = (Linc+Ldec)/2
# print(ltemp)
return torch.from_numpy(np.array(ltemp, dtype= "float32"))
def linc(y_k, prediction):
Linc = 0
for k in range(len(prediction)-1):
if y_k[k+1] > y_k[k]:
Linc = Linc + max(0,prediction[k]-prediction[k+1])
# print('linc')
return Linc/len(prediction)
def ldec(y_k, prediction):
Ldec = 0
for k in range(len(prediction)-1):
if y_k[k+1] < y_k[k]:
Ldec = Ldec + max(0,prediction[k+1]-prediction[k])
# print('ldec')
return Ldec/len(prediction)
|
carlesgarciac/regression
|
regression-cmr/utils/reg_loss.py
|
reg_loss.py
|
py
| 1,707 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26023685530
|
import numpy as np
import numpy as np
import matplotlib.pyplot as plt
from fealpy.mesh.uniform_mesh_2d import UniformMesh2d
from scipy.sparse.linalg import spsolve
#from ..decorator import cartesian
class MembraneOscillationPDEData: # 点击这里可以查看 FEALPy 中的代码
def __init__(self, D=[0, 1, 0, 1], T=[0, 5]):
"""
@brief 模型初始化函数
@param[in] D 模型空间定义域
@param[in] T 模型时间定义域
"""
self._domain = D
self._duration = T
def domain(self):
"""
@brief 空间区间
"""
return self._domain
def duration(self):
"""
@brief 时间区间
"""
return self._duration
def source(self, p, t):
"""
@brief 方程右端项
@param[in] p numpy.ndarray, 空间点
@param[in] t float, 时间点
@return 0
"""
return np.zeros_like(p[..., 0])
def init_solution(self, p):
"""
@brief 初值条件
@param[in] p numpy.ndarray, 空间点
@param[in] t float, 时间点
@return 返回 val
"""
x, y = p[..., 0], p[..., 1]
val = x**2*(x+y)
return val
def init_solution_diff_t(self, p):
"""
@brief 初值条件的导数
@param[in] p numpy.ndarray, 空间点
"""
return np.zeros_like(p[..., 0])
#@cartesian
def dirichlet(self, p, t):
"""
@brief Dirichlet 边界条件
@param[in] p numpy.ndarray, 空间点
@param[in] t float, 时间点
@return 边界条件函数值
"""
return np.zeros_like(p[..., 0])
pde = MembraneOscillationPDEData()
# 空间离散
domain = pde.domain()
nx = 100
ny = 100
hx = (domain[1] - domain[0])/nx
hy = (domain[3] - domain[2])/ny
mesh = UniformMesh2d([0, nx, 0, ny], h=(hx, hy), origin=(domain[0], domain[2]))
# 时间离散
duration = pde.duration()
nt = 1000
tau = (duration[1] - duration[0])/nt
# 准备初值
uh0 = mesh.interpolate(pde.init_solution, 'node') # (nx+1, ny+1)
vh0 = mesh.interpolate(pde.init_solution_diff_t, 'node') # (nx+1, ny+1)
uh1 = mesh.function('node') # (nx+1, ny+1)
def advance_explicit(n, *frags):
"""
@brief 时间步进为显格式
@param[in] n int, 表示第 n 个时间步
"""
t = duration[0] + n*tau
if n == 0:
return uh0, t
elif n == 1:
rx = tau/hx
ry = tau/hy
uh1[1:-1, 1:-1] = 0.5*rx**2*(uh0[0:-2, 1:-1] + uh0[2:, 1:-1]) + \
0.5*ry**2*(uh0[1:-1, 0:-2] + uh0[1:-1, 2:]) + \
(1 - rx**2 - ry**2)*uh0[1:-1, 1:-1] + tau*vh0[1:-1, 1:-1]
gD = lambda p: pde.dirichlet(p, t)
mesh.update_dirichlet_bc(gD, uh1)
return uh1, t
else:
A = mesh.wave_operator_explicit(tau)
source = lambda p: pde.source(p, t + tau)
f = mesh.interpolate(source, intertype='node')
f *= tau**2
uh2 = [email protected] - uh0.flat
uh0[:] = uh1[:]
uh1.flat = uh2
gD = lambda p: pde.dirichlet(p, t + tau)
mesh.update_dirichlet_bc(gD, uh1)
#solution = lambda p: pde.solution(p, t + tau)
#e = mesh.error(solution, uh1, errortype='max')
#print(f"the max error is {e}")
return uh1, t
def advance_implicit(n, *frags):
"""
@brief 时间步进为隐格式
@param[in] n int, 表示第 n 个时间步
"""
t = duration[0] + n*tau
if n == 0:
return uh0, t
elif n == 1:
rx = tau/hx
ry = tau/hy
uh1[1:-1, 1:-1] = 0.5*rx**2*(uh0[0:-2, 1:-1] + uh0[2:, 1:-1]) + \
0.5*ry**2*(uh0[1:-1, 0:-2] + uh0[1:-1, 2:]) + \
(1 - rx**2 - ry**2)*uh0[1:-1, 1:-1] + tau*vh0[1:-1, 1:-1]
gD = lambda p: pde.dirichlet(p, t)
mesh.update_dirichlet_bc(gD, uh1)
return uh1, t
else:
A0, A1, A2 = mesh.wave_operator_implicit(tau)
source = lambda p: pde.source(p, t + tau)
f = mesh.interpolate(source, intertype='node')
f *= tau**2
f.flat += [email protected] + [email protected]
uh0[:] = uh1[:]
gD = lambda p: pde.dirichlet(p, t + tau)
A0, f = mesh.apply_dirichlet_bc(gD, A0, f)
uh1.flat = spsolve(A0, f)
#solution = lambda p: pde.solution(p, t + tau)
#e = mesh.error(solution, uh1, errortype='max')
#print(f"the max error is {e}")
return uh1, t
"""
box = [0, 1, 0, 1, 0, 5]
fig, axes = plt.subplots()
mesh.show_animation(fig, axes, box, advance_explicit,
fname='explicit.mp4', plot_type='imshow', frames=nt+1)
plt.show()
"""
box = [0, 1, 0, 1, -2, 2]
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
axes = fig.add_subplot(111, projection='3d')
mesh.show_animation(fig, axes, box, advance_explicit,
fname='explicit.mp4', plot_type='surface', frames=nt+1)
plt.show()
"""
box = [0, 1, 0, 1, -1, 1]
fig, axes = plt.subplots()
mesh.show_animation(fig, axes, box, advance_implicit,fname='implicit.mp4', plot_type='imshow', frames=nt+1)
plt.show()
box = [0, 1, 0, 1, -2.0, 2.0]
fig = plt.figure()
axes = fig.add_subplot(111, projection='3d')
mesh.show_animation(fig, axes, box, advance_implicit,fname='implicit.mp4', plot_type='surface', frames=nt+1)
plt.show()
"""
|
suanhaitech/pythonstudy2023
|
Mia_wave/wace_2.py
|
wace_2.py
|
py
| 5,381 |
python
|
en
|
code
| 2 |
github-code
|
6
|
7298829560
|
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.visualization import make_lupton_rgb
from matplotlib.colors import LogNorm
from astropy.wcs import WCS
import numpy as np
db_open = [fits.open('frame-g-006793-1-0130.fits'),
fits.open('frame-i-006793-1-0130.fits'),
fits.open('frame-r-006793-1-0130.fits'),
fits.open('frame-u-006793-1-0130.fits'),
fits.open('frame-z-006793-1-0130.fits')]
class Glx(object):
def __init__(self, d):
self.g = d[0]
self.i = d[1]
self.r = d[2]
self.u = d[3]
self.z = d[4]
def img_rgb(self, nome='Galáxia'):
## rgb = make_lupton_rgb(self.i[0].data[8:1396,::], self.r[0].data[0:1388,::], self.g[0].data[12:1400,::], stretch=1, Q=10)
rgb = make_lupton_rgb(self.i[0].data, self.g[0].data, self.u[0].data, stretch=1, Q=10)
plt.imshow(rgb, origin='lower')
plt.title(nome)
plt.show()
def Log_Norm(self):
plt.imshow(self.r[0].data, cmap='gray', origin='lower', norm=LogNorm())
plt.show()
def Img_1_cor(self):
fig, ((ax0, ax1, ax2), (ax3, ax4, ax5)) = plt.subplots(nrows=2, ncols=3, sharex=True, figsize=(18, 8))
ax0.imshow(self.i[0].data, origin='lower', vmin=0.0001, vmax=0.6, cmap='RdBu')
ax0.set_title('Filtro I')
ax1.imshow(self.g[0].data, origin='lower', vmin=0.0001, vmax=0.6, cmap='RdBu')
ax1.set_title('Filtro G')
ax3.imshow(self.r[0].data, origin='lower', vmin=0.0001, vmax=0.6, cmap='RdBu')
ax3.set_title('Filtro R')
ax4.imshow(self.z[0].data, origin='lower', vmin=0.0001, vmax=0.6, cmap='RdBu')
ax4.set_title('Filtro Z')
ax5.imshow(self.u[0].data, origin='lower', vmin=0.0001, vmax=0.6, cmap='RdBu')
ax5.set_title('Filtro U')
fig.delaxes(ax=ax2)
plt.show()
def pl(self):
g = self.g[0].data
print(g.shape)
print(g.min())
print(g.max())
print(g.mean())
print(np.percentile(g.flatten(),3))
print(np.percentile(g.flatten(), 97))
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, sharex=True, figsize=(18, 8))
ax0.imshow(g, vmin=0.1, vmax=6, origin='lower', cmap='viridis')
ax1.imshow(g, vmin=np.percentile(g.flatten(),5), vmax=np.percentile(g.flatten(), 95), origin='lower', cmap='viridis')
plt.show()
def main(db):
galaxia = Glx(db)
galaxia.pl()
if __name__ == '__main__':
main(db=db_open)
|
ViniBilck/Astro-Vinicius
|
Cubos/Codes/Galaxy - 1/Galaxy1.py
|
Galaxy1.py
|
py
| 2,530 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26606119063
|
#!/usr/bin/env python
# coding: utf-8
# # Import Library
# In[387]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from glob import glob
from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import accuracy_score , classification_report
import pandas_profiling
from category_encoders import OneHotEncoder
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_validate
# In[388]:
t=pd.read_csv("test.csv")
t.info()
# # Import Data
# In[389]:
def wrangel(path):
# read data
df=pd.read_csv(path)
#extract the social name
df["title"]=df["Name"].str.extract("([A-Za-z]+)\.",expand=False)
#convert title categorcal data
df.loc[df["title"]=="Mr" , "title"] = 0
df.loc[df["title"]=="Miss" , "title"] = 1
df.loc[df["title"]=="Mrs" , "title"] = 2
df.loc[df["title"]=="Master" , "title"] = 3
conditions = (df["title"] == 'Ms') | (df["title"] == 'Col') | (df["title"] == 'Rev') | (df["title"] == 'Dr') | (df["title"] == 'Dona')
df.loc[conditions, "title"] = 4
#fill NAN Value of Fare Accorging to Social Name
df["Fare"].fillna(df.groupby("Pclass")["Fare"].transform("median"),inplace=True)
#fill NAN Value of Age Accorging to Social Name
df["Age"].fillna(df.groupby("title")["Age"].transform("median"),inplace=True)
#fill NAN Value of Embarked Accorging to Median
df["Embarked"]=df["Embarked"].fillna("S")
#remove nan columns
drop=[]
drop.append("Cabin")
drop.append("Name")
drop.append("Ticket")
drop.append("title")
df.drop(columns=drop,inplace=True)
#convert Sex categorcal data
df.loc[df["Sex"]=="male" , "Sex"] = 0 # Male ---> 0
df.loc[df["Sex"]=="female" , "Sex"] = 1 # Female ---> 1
#convert Embarked categorcal data
df.loc[df["Embarked"]=="S" , "Embarked"] = 0 # S ---> 1
df.loc[df["Embarked"]=="C" , "Embarked"] = 1 # C ---> 2
df.loc[df["Embarked"]=="Q" , "Embarked"] = 2 # Q ---> 3
return df
# In[390]:
test = wrangel("test.csv")
df = wrangel("train.csv")
# In[340]:
df.head()
# In[341]:
df.info()
# In[391]:
pandas_profiling.ProfileReport(df)
# In[343]:
df["Embarked"].value_counts()
# In[344]:
test.info()
# In[352]:
test.isnull().sum()
# # Exploer Data
# In[353]:
print("Survive :",(df["Survived"]==1).sum())
print("Deceased :",(df["Survived"]==0).sum())
# In[354]:
df.describe()
# In[355]:
# Create the pie chart
values=df["Survived"].value_counts()
label=["Deceased ","Survive "]
plt.pie(values, labels=label,autopct='%1.1f%%')
# Add a title
plt.title('Distribution of Survived')
# Display the chart
plt.show()
# In[356]:
plt.hist(df["Parch"],bins=5, edgecolor='black');
plt.xlabel('Values')
plt.ylabel('Frequancy')
plt.title("Values of Parch")
plt.show();
# In[357]:
survive=df[df["Survived"]==1]["SibSp"].value_counts()
death=df[df["Survived"]==0]["SibSp"].value_counts()
dx=pd.DataFrame([survive,death],index=["survive","death"])
dx.plot(kind="bar");
plt.title("Survive of SibSp ");
# In[358]:
survive=df[df["Survived"]==1]["Pclass"].value_counts()
death=df[df["Survived"]==0]["Pclass"].value_counts()
dx=pd.DataFrame([survive,death],index=["survive","death"])
dx.plot(kind="bar");
plt.title("Survive of Pclass ");
# In[359]:
class1=df[df["Pclass"]==1]["Embarked"].value_counts()
class2=df[df["Pclass"]==2]["Embarked"].value_counts()
class3=df[df["Pclass"]==3]["Embarked"].value_counts()
dx=pd.DataFrame([class1,class2,class3],index=["class 1","class 2","class 3"])
dx.plot(kind="bar",stacked=True);
plt.title("Survive of Pclass ");
# We Found that Embarked from S in 1st & 2nd & 3rd Class
# In[360]:
# Create the pie chart
values=df["Sex"].value_counts()
label=["male","female"]
plt.pie(values, labels=label,autopct='%1.1f%%')
# Add a title
plt.title('Distribution of Survived')
# Display the chart
plt.show()
# In[361]:
survive = df[df["Survived"]==1]["Sex"].value_counts()
death = df[df["Survived"]==0]["Sex"].value_counts()
dx = pd.DataFrame([survive,death],index=["survive","death"])
dx=dx.rename(columns={0:"male",1:"female"})
dx.plot(kind="bar")
plt.legend()
plt.title("Survive of Sex");
# In[ ]:
# In[362]:
corrleation = df.drop(columns="Survived").corr()
sns.heatmap(corrleation)
# # Split Data
# In[ ]:
# In[363]:
df
# In[364]:
target="Survived"
y = df[target]
X = df.drop(columns=target)
x_train , x_test , y_train , y_test = train_test_split(X,y,test_size=0.2,random_state=42)
print("X_train shape:", x_train.shape)
print("y_train shape:", y_train.shape)
print("X_test shape:", x_test.shape)
print("y_test shape:", y_test.shape)
# # Baseline
# In[365]:
y_train_mean = y_train.mean()
print ("Baseline :",round(y_train_mean,2))
# # Logestic Regression
# # Itrate
# In[366]:
log_model = LogisticRegression(max_iter=10000)
# In[367]:
log_model.fit(x_train,y_train)
# #
# # Evaluate
# In[368]:
accuracy=classification_report(y_test,log_model.predict(x_test))
print(accuracy)
# In[369]:
acc_test = accuracy_score(y_test,log_model.predict(x_test))
acc_test = accuracy_score(y_test,log_model.predict(x_test))
acc_train= accuracy_score(y_train,log_model.predict(x_train))
print("Accuracy test:",round(acc_test,2))
print("Accuracy train:",round(acc_train,2))
# # KNN Classfier
# In[370]:
knn= KNeighborsClassifier(n_neighbors=13)
knn.fit(x_train,y_train)
# In[371]:
accuracy=classification_report(y_test,knn.predict(x_test))
print(accuracy)
# In[372]:
scoring="accuracy"
score = cross_validate(knn , x_train.drop(columns=["PassengerId"],axis=1),y_train,cv=k_fold, n_jobs=1,scoring=scoring)
print(score['test_score'])
# In[373]:
print("Accuracy :",round(np.mean(score['test_score']),2))
# # Descion Tree
# In[374]:
# Create a decision tree classifier
dec_tree= DecisionTreeClassifier()
# Train the classifier
dec_tree.fit(x_train, y_train)
# In[375]:
accuracy=classification_report(y_test,dec_tree.predict(x_test))
print(accuracy)
# In[376]:
acc_test = accuracy_score(y_test,dec_tree.predict(x_test))
print("Accuracy test:",round(acc_test,2))
# In[377]:
scoring="accuracy"
score = cross_validate(dec_tree , x_train.drop(columns=["PassengerId"],axis=1),y_train,cv=k_fold, n_jobs=1,scoring=scoring)
print("Accuracy :",round(np.mean(score['test_score']),2))
# # Random Forest
# In[378]:
# Create a Random Forest classifier
rf_classifier = RandomForestClassifier()
# Train the classifier
rf_classifier.fit(x_train, y_train)
# In[379]:
# Calculate the accuracy
accuracy = accuracy_score(y_test, rf_classifier.predict(x_test))
print("Accuracy:", round(accuracy,2))
# In[380]:
scoring="accuracy"
score = cross_validate(rf_classifier , x_train.drop(columns=["PassengerId"],axis=1),y_train, n_jobs=1,scoring=scoring)
print("Accuracy :",round(np.mean(score['test_score']),1))
# # Naive Bayes
# In[381]:
nav= GaussianNB()
# Train the classifier
nav.fit(x_train, y_train)
# In[382]:
# Calculate the accuracy
accuracy = accuracy_score(y_test, nav.predict(x_test))
print("Accuracy:", round(accuracy,2))
# In[383]:
scoring="accuracy"
score = cross_validate(nav , x_train.drop(columns=["PassengerId"],axis=1),y_train, n_jobs=1,scoring=scoring)
print("Accuracy :",round(np.mean(score['test_score']),2))
# # Communicat
# The best model is Random Forest with Accuracy : 82
# In[384]:
pred_test=rf_classifier.predict(test)
data = pd.DataFrame({'PassengerId': test["PassengerId"], 'Survived': pred_test})
# In[385]:
data.head()
# In[386]:
data.to_csv(r'D:\projects\gender_submission.csv', index=False)
# In[ ]:
|
tamerelateeq/Titanc
|
titank.py
|
titank.py
|
py
| 8,173 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14469263973
|
'''
You are given an integer n. There is an undirected graph with n nodes, numbered from 0 to n - 1. You are given a 2D integer array edges where edges[i] = [ai, bi] denotes that there exists an undirected edge connecting nodes ai and bi.
Return the number of pairs of different nodes that are unreachable from each other.
Example 1:
Input: n = 3, edges = [[0,1],[0,2],[1,2]]
Output: 0
Explanation: There are no pairs of nodes that are unreachable from each other. Therefore, we return 0.
Example 2:
Input: n = 7, edges = [[0,2],[0,5],[2,4],[1,6],[5,4]]
Output: 14
Explanation: There are 14 pairs of nodes that are unreachable from each other:
[[0,1],[0,3],[0,6],[1,2],[1,3],[1,4],[1,5],[2,3],[2,6],[3,4],[3,5],[3,6],[4,6],[5,6]].
Therefore, we return 14.
Constraints:
1 <= n <= 105
0 <= edges.length <= 2 * 105
edges[i].length == 2
0 <= ai, bi < n
ai != bi
There are no repeated edges.
'''
class Solution:
def countPairs(self, n: int, edges: List[List[int]]) -> int:
adj = defaultdict(list)
for edge in edges:
adj[edge[0]].append(edge[1])
adj[edge[1]].append(edge[0])
num_pairs = 0
size = 0
remaining = n
visit = [False] * n
for i in range(n):
if not visit[i]:
size = self.dfs(i, adj, visit)
num_pairs += size * (remaining - size)
remaining -= size
return num_pairs
def dfs(self, node, adj, visit):
count = 1
visit[node] = True
if node not in adj:
return count
for nei in adj[node]:
if not visit[nei]:
count += self.dfs(nei, adj, visit)
return count
|
loganyu/leetcode
|
problems/2316_count_unreachable_pairs_of_nodes_in_an_undirected_graph.py
|
2316_count_unreachable_pairs_of_nodes_in_an_undirected_graph.py
|
py
| 1,707 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11467832022
|
#Vertex class
class Vertex:
def __init__(self, key):
self.id = key
self.connected_to = {}
#Add neighbors
def add_neighbor(self, nbr, weight=0):
self.connected_to[nbr] = weight
#return all keys in connected to dict
def get_connections(self):
self.connected_to.keys()
#Return id
def get_id(self):
return self.id
#Return weight
def get_weight(self, nbr):
return self.connected_to[nbr]
def __str__(self):
return str(self.id) + ' connected to: ' + str([x.id for x in self.connected_to])
#Graph class, represented as an adjacency list
class Graph:
def __init__(self):
self.vert_list = {}
self.num_vert = 0
#Add vertex at index key
def add_vertex(self, key):
self.num_vert += 1
new_vertex = Vertex(key)
self.vert_list[key] = new_vertex
return new_vertex
#Return vertex at index n
def get_vertex(self, n):
#Looks through keys in vert_list
if n in self.vert_list:
return self.vert_list[n]
else:
return None
#Add an edge between two vertices
def add_edge(self, f, t, cost=0):
#f = from vertex
#t = to
#cost = weight
if f not in self.vert_list:
nv = self.add_vertex(f)
if t not in self.vert_list:
nv = self.add_vertex(t)
self.vert_list[f].add_neighbor(self.vert_list[t], cost)
#Reutns all vertices
def get_vertices(self):
return self.vert_list.keys()
#make an iterattable object
def __iter__(self):
return iter(self.vert_list.values())
def __contains__(self, n):
return n in self.vert_list
#Rivalry class
class Rivalry:
def __init__(self, boy1, boy2):
self.boy1 = boy1
self.boy2 = boy2
@staticmethod
def fromList(le):
if len(le) !=2:
raise Exception('Invalid boy line entry')
return Rivalry(le[0], le[1])
def __str__(self):
return "Rivalry(boy1: {}, boy2: {})".format(self.boy1, self.boy2)
def __repr__(self):
return self.__str__()
#Boy class
class Boy:
def __init__(self, index, boy):
self.index = index
self.boy = boy
@staticmethod
def fromList(le):
if len(le) !=2:
raise Exception('Invalid boy')
return Boy(le[0], le[1])
def __str__(self):
return "Rivalry(index: {}, boy: {})".format(self.index, self.boy)
def __repr__(self):
return self.__str__()
def make_graph(boys, rivalries):
#Add each boy as a vertex
g = Graph()
for i in range(len(boys)):
g.add_vertex(boys[i])
#Add each rivalry as an edge, using boy1 as a 'from' vertex and boy2 as a 'to' vertex
for i in range(len(rivalries)):
boy1 = rivalries[i].boy1
boy2 = rivalries[i].boy2
g.add_edge(boy1, boy2)
return g
def bfs(graph_to_search, start, end):
queue = [[start]]
visited = set()
while queue:
# Gets the first path in the queue
path = queue.pop(0)
# Gets the last node in the path
vertex = path[-1]
# Checks if we got to the end
if vertex == end:
return path
# We check if the current node is already in the visited nodes set in order not to recheck it
elif vertex not in visited:
# enumerate all adjacent nodes, construct a new path and push it into the queue
neighbors = vertex.connected_to
#for current_neighbour in graph_to_search.get_vertex(vertex).get_connections():
for current_neighbour in neighbors:
new_path = list(path)
new_path.append(current_neighbour)
queue.append(new_path)
# Mark the vertex as visited
visited.add(vertex)
#Checks each edge to see that it goes between a Babyface and Heel
def edge_check(boys, rivalries, babyfaces, heels):
valid_edges = False
for rivalry in rivalries:
if rivalry.boy1 in babyfaces:
if rivalry.boy2 in heels:
#then this connection is valid
valid_edges = True
else:
valid_edges = False
elif rivalry.boy1 in heels:
if rivalry.boy2 in babyfaces:
valid_edges = True
else:
valid_edges = False
return valid_edges
#*****************************
#Get info from text file
lines = []
with open('boys.txt', 'r') as file:
line = file.readline()
while line:
#Stick current line into new list
cur_line = line.split()
if len(cur_line) > 0:
lines.append(cur_line)
line = file.readline()
#Now that we have all the data, we can parse and format it
if len(lines[0]) != 1:
raise Exception('invalid formatting for number of boys on line 1')
#first line
num_boys = int(lines[0][0])
# print("Number of boys:")
# print(num_boys)
boys = [boy[0] for boy in lines[1:(num_boys+1)]]
#From the number of rivalries to the end
#print("Rivalries:")
rivalries = lines[(2+num_boys):]
# print(rivalries)
# print("Boys")
# print(boys)
#Make a list of Rivalry objects
#rivalries = [Rivalry.fromList(riv) for riv in rivalries]
#Test data
#boys = ['Ace', 'Duke', 'Jax', 'Biggs', 'Stone']
rivalries = [['Ace', 'Duke'], ['Ace', 'Biggs'], ['Jax', 'Duke'], ['Stone', 'Biggs'], ['Stone', 'Duke'], ['Biggs', 'Jax']]
#print(rivalries)
rivalries = [Rivalry.fromList(riv) for riv in rivalries]
#Make the graph
g = make_graph(boys, rivalries)
#Get set of distances
babyfaces=[]
heels=[]
start = g.get_vertex(boys[0])
babyfaces.append(start.id)
#d = 0
for vertex in g:
target = g.get_vertex(vertex.id)
#Call bfs on this vertex to get its distance from Start
if start!=target:
#d +=1
path = bfs(g, start, target)
if path is not None:
d = len(path)
else:
d = -1
#print"Dist from %s to %s", (start.id, target.id)
if d %2 == 0:
babyfaces.append(vertex.id)
else:
heels.append(vertex.id)
#Check that edges go bewtween babyfaces and heels and not two of the same group
valid_edges = edge_check(boys, rivalries, babyfaces, heels)
if valid_edges == True:
print("This is valid!")
print("babyfaces:")
print(babyfaces)
print("Heels")
print(heels)
else:
print("No, it is not possible to designate this list of boys as one or the other with the given rivalries")
|
sarahovey/AnalysisOfAlgos
|
hw5/hw5.py
|
hw5.py
|
py
| 6,759 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73551642109
|
import random
class Rsa:
def __init__(self, q=19, p=23, size_of_key=0):
self.q = q
self.p = p
if size_of_key:
self.q = self.gen_prime(size_of_key)
self.p = self.gen_prime(size_of_key)
while self.p == self.q :
self.p = self.gen_prime(size_of_key)
self.public, self.private = self.generate_key_pair(self.q, self.p)
def gen_prime(self, size_of_key):
first_primes_list = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
31, 37, 41, 43, 47, 53, 59, 61, 67,
71, 73, 79, 83, 89, 97, 101, 103,
107, 109, 113, 127, 131, 137, 139,
149, 151, 157, 163, 167, 173, 179,
181, 191, 193, 197, 199, 211, 223,
227, 229, 233, 239, 241, 251, 257,
263, 269, 271, 277, 281, 283, 293,
307, 311, 313, 317, 331, 337, 347, 349]
def nBitRandom(n):
return random.randrange(2**(n-1)+1, 2**n - 1)
def getLowLevelPrime(n):
'''Generate a prime candidate divisible
by first primes'''
while True:
# Obtain a random number
pc = nBitRandom(n)
# Test divisibility by pre-generated
# primes
for divisor in first_primes_list:
if pc % divisor == 0 and divisor**2 <= pc:
break
else: return pc
def isMillerRabinPassed(mrc):
'''Run 20 iterations of Rabin Miller Primality test'''
maxDivisionsByTwo = 0
ec = mrc-1
while ec % 2 == 0:
ec >>= 1
maxDivisionsByTwo += 1
assert(2**maxDivisionsByTwo * ec == mrc-1)
def trialComposite(round_tester):
if pow(round_tester, ec, mrc) == 1:
return False
for i in range(maxDivisionsByTwo):
if pow(round_tester, 2**i * ec, mrc) == mrc-1:
return False
return True
# Set number of trials here
numberOfRabinTrials = 20
for i in range(numberOfRabinTrials):
round_tester = random.randrange(2, mrc)
if trialComposite(round_tester):
return False
return True
while True:
prime_candidate = getLowLevelPrime(size_of_key)
if not isMillerRabinPassed(prime_candidate):
continue
else:
print(size_of_key, "bit prime is: \n", prime_candidate)
return prime_candidate
def modInverse(self, e, phi):
m0 = phi
y = 0
x = 1
if (phi == 1):
return 0
while (e > 1):
# q is quotient
q = e // phi
t = phi
# m is remainder now, process
# same as Euclid's algo
phi = e % phi
e = t
t = y
# Update x and y
y = x - q * y
x = t
# Make x positive
if (x < 0):
x = x + m0
return x
'''
Tests to see if a number is prime.
'''
def is_prime(self, num):
if num == 2:
return True
if num < 2 or num % 2 == 0:
return False
for n in range(3, int(num**0.5)+2, 2):
if num % n == 0:
return False
return True
def gcd(self, a, b):
while b != 0:
a, b = b, a % b
return a
def generate_key_pair(self, p, q):
# if not (self.is_prime(p) and self.is_prime(q)):
# raise ValueError('Both numbers must be prime.')
# elif p == q:
# raise ValueError('p and q cannot be equal')
# n = pq
n = p * q
# Phi is the totient of n
phi = (p-1) * (q-1)
# Choose an integer e such that e and phi(n) are coprime
e = random.randrange(1, phi)
# Use Euclid's Algorithm to verify that e and phi(n) are coprime
g = self.gcd(e, phi)
while g != 1:
e = random.randrange(1, phi)
g = self.gcd(e, phi)
# Use Extended Euclid's Algorithm to generate the private key
d = self.modInverse(e, phi)
# Return public and private key_pair
# Public key is (e, n) and private key is (d, n)
return ((e, n), (d, n))
#def encrypt(self, plaintext, public_key):
def encrypt(self, plaintext , pub_key):
# Unpack the key into it's components
#key , n = public_key
key , n = pub_key
# Convert each letter in the plaintext to numbers based on the character using a^b mod m
cipher = [pow(ord(char), key , n) for char in plaintext]
# Return the array of bytes
return cipher
def decrypt(self, ciphertext):
# Unpack the key into its components
key, n = self.private
ciphertext = ciphertext.split('\/')
ciphertext.pop()
ciphertext = ciphertext
# Generate the plaintext based on the ciphertext and key using a^b mod m
aux = [str(pow(int(char), key, n)) for char in ciphertext]
# Return the array of bytes as a string
plain = [chr(int(char2)) for char2 in aux]
return ''.join(plain)
# if __name__ == '__main__':
# while True:
# print("------------------")
# x = Rsa(size_of_key=512)
# print("Public Key :",x.public)
# cipher = x.encrypt(input("Plain Text < "),x.public)
# plain = x.decrypt(cipher)
# print("Plain Text >",plain)
|
Ibrahim-AbuShara/End-to-End-Encryption
|
RSA.py
|
RSA.py
|
py
| 5,861 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10775113939
|
import re
import os
from collections import Counter, defaultdict, namedtuple
from itertools import combinations, product
from pprint import pprint
from parse import parse, findall
from math import prod, sqrt
dirname = os.path.dirname(__file__)
data = open(f'{dirname}/21-input.txt').read().splitlines()
data = [parse('{} (contains {})', d).fixed for d in data]
data = [(set(i.split(' ')), a.split(', ')) for i, a in data]
set_of_all_ingredients = set.union(*[i for i, _ in data])
all_ingredients = sum((list(i) for i, _ in data), list())
result = dict()
for ingredients, allergens in data:
for allergen in allergens:
if allergen in result:
result[allergen] &= ingredients
else:
result[allergen] = ingredients.copy()
set_of_ingredients_with_no_allergens = set_of_all_ingredients.copy()
for allergen, ingredients in result.items():
set_of_ingredients_with_no_allergens -= ingredients
print(sum([all_ingredients.count(s) for s in set_of_ingredients_with_no_allergens]))
result2 = {}
allergens_left = list(result.keys())
ingredients_identified = set()
while allergens_left:
for allergen in allergens_left.copy():
possibles = result[allergen] - ingredients_identified
if len(possibles) == 1:
ingredient = possibles.pop()
result2[allergen] = ingredient
ingredients_identified.add(ingredient)
allergens_left.remove(allergen)
print(','.join(result2[allergen] for allergen in sorted(result2.keys())))
|
knjmooney/Advent-Of-Code
|
2020/21-allergens.py
|
21-allergens.py
|
py
| 1,513 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37111986575
|
#얘는 계속 실행이 되어야 해서, jupyter notebook에서는 안된다.
#이걸 하는 목적 : dialog flow로부터 데이터를 받아, 여기서 처리한 후 다시 dialog flow로 반환
#그걸 위해서는 json으로 리턴해야 한다.
import requests
import urllib
import IPython.display as ipd
import json
from bs4 import BeautifulSoup
from flask import Flask, request, jsonify
def getWeather(city) :
url = "https://search.naver.com/search.naver?query="
url = url + urllib.parse.quote_plus(city + "날씨")
print(url)
bs = BeautifulSoup(urllib.request.urlopen(url).read(), "html.parser")
temp = bs.select('span.todaytemp')
desc = bs.select('p.cast_txt')
#dictionery가 좋은 리턴방식이다.
return {"temp":temp[0].text, "desc":desc[0].text} #temp가 온도, desc가 어제보다 4도 낮아요.
#return {"temp":temp[4+7].text, "desc":desc[0].text} #dctionery방식으로 하면, 이런식으로, 수정할때 용이하다.
#return temp[0].text + "/" + desc[0].text #리턴 값을 문자열로 준다.
#Flask 객체 생성
app = Flask(__name__)
@app.route('/') #'데코레이터'라고 한다. 특정 함수가 호출할때, 앞뒤로 감싸는것, 클래스에 선언된 route다. 브라우저에 입력한것을 home에 넣어준다.
#잘 몰라도, 웹어플리케이션을 쉽게 만들도록 해준다.
def home():
name = request.args.get("name")
item = request.args.get("item")
return "hello"#호출할때, 반드시 name이라는 파라미터를 호출해야한다.
@app.route('/abc')#데코레이터'라고 한다. 특정 함수가 호출할때, 앞뒤로 감싸는것, 클래스에 선언된 route다. 브라우저에 입력한것을 home에 넣어준다.
#잘 몰라도, 웹어플리케이션을 쉽게 만들도록 해준다.
def abc():
return "test"
@app.route('/weather')#데코레이터'라고 한다. 특정 함수가 호출할때, 앞뒤로 감싸는것, 클래스에 선언된 route다. 브라우저에 입력한것을 home에 넣어준다.
#잘 몰라도, 웹어플리케이션을 쉽게 만들도록 해준다.
def weather():
city = request.args.get("city")
info = getWeather(city)
#return "<font color=red>" + info["temp"] + "도 " + info["desc"] + "</font>"
#return info #웹표준방식이 아니어서, 안된다.
#return json.dumps(info)
return jsonify(info)
#어떤 요청이 들어와도, 무조건, Hello만 리턴하는 서버
#GET방식으로도, POST방식으로도 호출 가능하게 한것, 서비스 할때는, GET방식을 빼준다.
#GET방식은 디버깅할때 사용, 공인 서버가 아니다 보니까, dialog가 우리서버를 호출할 수 없다.
@app.route('/dialogflow', methods=['GET', 'POST'])
def dialogflow():
req = request.get_json(force=True)
print(json.dumps(req, indent=4))
res = {'fulfillmentText':'Hello~~~'}
return jsonify(res)
#dialogflow에서 만든 규약을 지켜서 return을 해야한다. json파일로 해야한다.
if __name__ == '__main__':
#host = 0.0.0.0에는 실제 ip를 넣어주면 된다. 0.0.0.0은 ip를 모르더라도 접속할 수 있다. 원래는 자기 ip를 써야 한다.
#그럴때, 쓸 수 있는게 0.0.0.0, 127.0.0.1 두가지를 사용할 수 있다.
app.run(host='0.0.0.0', port = 3000, debug=True)
|
ssh6189/2020.02.05
|
server.py
|
server.py
|
py
| 3,365 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
38169404173
|
from . import parse as parser
from modules import YaraRules, GeoIP, ProjectHoneyPot, LangDetect
class Scanner:
def __init__(self):
self.yara_manager = YaraRules.YaraManager()
def parse_email(self, email_content: str):
return parser.parse_email(email_content)
def scan(self, email: str):
# parse it
parsed_email = self.parse_email(email)
# Use LangDetect on the body of the email, check if it's HTML or not
# If it's HTML, use BeautifulSoup to parse it
# If it's not HTML, just continue like usual
content = parsed_email.get_payload()
#lang = LangDetect.detect_language(content)
potentialLanguage = []
# Loop around the content if it has multiple parts
# Then use the LangDetect to detect the language of each part
# Append the result to the potentialLanguage list
if parsed_email.is_multipart():
for part in parsed_email.walk():
content_type = part.get_content_type()
content_disposition = str(part.get("Content-Disposition"))
# Extract text/plain content
if "attachment" not in content_disposition and "text/plain" in content_type:
content = part.get_payload(decode=True)
print("Content -> ", content)
# turn content into string but also fix some encoding issues, and make it prettier for it to read
content = content.decode('utf-8', 'ignore')
content = content.replace("\r\n", "")
content = content.replace("\n", "")
content = content.replace("\t", "")
lang = LangDetect.detect_language(content)
potentialLanguage.append(lang)
else:
continue
print("Language -> ", potentialLanguage)
# get ip and geoip
ip = parsed_email.get("Received-SPF").split("client-ip=")[1].split(";")[0]
print("IP Address -> " + str(ip))
#geoip = GeoIP.GeoIP(ip)
#print("GeoIP -> ", geoip)
# check if ip is in honeypot
honeypot = ProjectHoneyPot.ProjectHoneyPot(ip)
print("Honeypot -> " + str(honeypot))
# analyze it
analysis_result = self.yara_manager.analyze_email(email)
# return the result
return {
"analysis_result": analysis_result,
"parsed_email": parsed_email,
#"geoip": geoip,
"honeypot": honeypot,
}
|
lukasolsen/EmailAnalyser
|
server/base/service/scan.py
|
scan.py
|
py
| 2,296 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2247235592
|
testname = 'TestCase 4.1.1'
avoiderror(testname)
printTimer(testname, 'Start', '测试AC通过配置二层vlan发现列表发现AP')
################################################################################
# Step 1
#
# 操作
# AC1上面创建vlan20,将端口s1p1划入vlan20。
# AC1上面将vlan20加入到自动发现的vlan列表。
# AC1(config-wireless)#discovery vlan-list 20
# S3上面将s3p1划入vlan20
#
# 预期
# AC1上show wireless discovery vlan-list看到'VLAN'项已经显示有'0'
#
################################################################################
printStep(testname, 'Step 1',
'Config AC1 and S3 to enable discover AP1 automatically',
'Check the result')
res1 = 1
# operate
# AC1 配置
EnterConfigMode(switch1)
SetCmd(switch1, 'vlan', Vlan20)
SetCmd(switch1, 'switchport interface', s1p1)
EnterInterfaceMode(switch1, 'vlan ' + Vlan20)
IdleAfter(3)
SetIpAddress(switch1, If_vlan20_s1_ipv4, '255.255.255.0')
# 关闭初始配置中AP1三层发现
EnterWirelessMode(switch1)
SetCmd(switch1, 'no discovery ip-list', Ap1_ipv4)
SetCmd(switch1, 'no discovery ipv6-list', Ap1_ipv6)
# 打开二层发现
EnterWirelessMode(switch1)
SetCmd(switch1, 'discovery vlan-list', Vlan20)
# S3配置
EnterConfigMode(switch3)
SetCmd(switch3, 'vlan', Vlan20)
SetCmd(switch3, 'switchport interface', s3p1)
EnterEnableMode(switch1)
data1 = SetCmd(switch1, 'show wireless discovery vlan-list', timeout=5)
# check
res1 = CheckLine(data1, Vlan20, 'vlan', IC=True)
# result
printCheckStep(testname, 'Step 1', res1)
################################################################################
# Step 2
# 操作
# 重起AP1
# WLAN-AP# reboot
#
# 预期
# 重起后AP1被AC1管理。AC1上show wi ap status显示AP的“Status”为“Managed”,
# “Configuration Status”为“Success”
################################################################################
printStep(testname, 'Step 2',
'Reboot AP1',
'Check if AC1 managed AP1')
res1 = 1
# operate
# set managed-ap mode为ap的隐藏命令,可以使Ap重新被AC认证(代替重启AP操作)
ChangeAPMode(ap1, ap1mac, switch1, Ap1cmdtype)
IdleAfter(20)
EnterEnableMode(switch1)
res1 = CheckSutCmd(switch1, 'show wireless ap status',
check=[(ap1mac, 'Managed', 'Success')],
waittime=5, retry=20, interval=5, IC=True)
# result
printCheckStep(testname, 'Step 2', res1)
################################################################################
# Step 3
#
# 操作
# AC1上用命令show wireless ap < AP1MAC > status查看Discovery Reason
#
# 预期
# AC1上显示 “Discovery Reason”为“L2 Poll Received”
################################################################################
printStep(testname, 'Step 3',
'AC1 show wireless ap <AP1MAC> status',
'Check the result')
res1 = 1
# operate&check
EnterEnableMode(switch1)
res1 = CheckSutCmd(switch1, 'show wireless ap ' + ap1mac + ' status',
check=[('Discovery Reason', 'L2 Poll Received')],
waittime=8, retry=10, interval=5, IC=True)
# result
printCheckStep(testname, 'Step 3', res1)
################################################################################
# Step 4
# 操作
# AC1上把vlan 20从vlan发现列表删除
# no discovery vlan-list 20
#
# 预期
# AC1上show wireless discovery vlan-list看到“VLAN”项已经没有“20”
################################################################################
printStep(testname, 'Step 4',
'Delete discovery vlan-list 20 on AC1',
'Check the result')
res1 = 1
# operate
EnterWirelessMode(switch1)
SetCmd(switch1, 'no discovery vlan-list', Vlan20)
data1 = SetCmd(switch1, 'show wireless discovery vlan-list')
# check
res1 = CheckLine(data1, Vlan20, 'vlan', IC=True)
res1 = 1 if 0 == res1 else 0
# result
printCheckStep(testname, 'Step 4', res1)
################################################################################
# Step 5
# 操作
# 重起AP1
# WLAN-AP# reboot
#
# 预期
# 重起后AP1不能被AC1管理。AC1上show wi ap status显示AP的“Status”为“Failed”,
################################################################################
printStep(testname, 'Step 5',
'Reboot AP1',
'Check if AC1 managed AP1')
res1 = 1
# operate
ChangeAPMode(ap1, ap1mac, switch1, Ap1cmdtype)
IdleAfter(30)
EnterEnableMode(switch1)
data1 = SetCmd(switch1, 'show wireless ap status', timeout=5)
# check
res1 = CheckLine(data1, ap1mac, 'Failed', IC=True)
# result
printCheckStep(testname, 'Step 5', res1)
################################################################################
# Step 6
# 操作
# 恢复默认配置
################################################################################
printStep(testname, 'Step 6',
'Recover initial config')
# operate
# S3恢复
EnterConfigMode(switch3)
SetCmd(switch3, 'vlan', Vlan40, timeout=1)
SetCmd(switch3, 'switchport interface', s3p1, timeout=3)
# AC1恢复
EnterConfigMode(switch1)
SetCmd(switch1, 'no interface vlan', Vlan20, timeout=5)
SetCmd(switch1, 'no vlan', Vlan20, timeout=3)
SetCmd(switch1, 'vlan', Vlan40, timeout=3)
SetCmd(switch1, 'switchport interface', s1p1, timeout=3)
# 开启对AP1的三层发现
EnterWirelessMode(switch1)
SetCmd(switch1, 'discovery ip-list', Ap1_ipv4)
SetCmd(switch1, 'discovery ipv6-list', Ap1_ipv6)
# IdleAfter(Ap_connect_after_reboot)
CheckSutCmd(switch1, 'show wireless ap status',
check=[(ap1mac, 'Managed', 'Success'), (ap2mac, 'Managed', 'Success')],
waittime=5, retry=20, interval=5, IC=True)
# end
printTimer(testname, 'End')
|
guotaosun/waffirm
|
autoTests/waffirm/waffirm_4.1.1_ONE.py
|
waffirm_4.1.1_ONE.py
|
py
| 5,708 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73789788989
|
maior = 0
from random import randint
import time
from operator import itemgetter
dados = {'j1': randint(1,6), 'j2': randint(1,6), 'j3': randint(1,6),
'j4': randint(1,6), 'j5': randint(1,6) }
for d,i in dados.items():
time.sleep(1)
print(f'joogador {d} tirou o numero {i}')
ranking = dict()
ranking = sorted(dados.items(), key=itemgetter(1), reverse=True)
for d,i in enumerate(ranking):
print(f'{d+1}o. {i}')
|
Kaue-Marin/Curso-Python
|
pacote dowlond/curso python/exercicio91.py
|
exercicio91.py
|
py
| 419 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35574468725
|
from collections import defaultdict
def createGraph():
g=defaultdict(list)
return g
def topoSort(g,indeg,q,cnt,n,res):
for i in range(n):
if indeg[i] is 0:
q.append(i)
while(q):
cur=q.pop(0)
for i in g[cur]:
indeg[i]-=1
if(indeg[i] is 0):
q.append(i)
res.append(cur)
cnt+=1
if cnt is n:
return True
return False
def kahnsAlgo(g,indeg,n):
q,res=[],[]
if topoSort(g,indeg,q,0,n,res) is True:
return res
return []
if __name__ == "__main__":
# prequisites=[[1,0],[2,0],[3,1],[3,2]]
prequisites=[[1,0],[2,1],[3,2],[1,3]]
g=createGraph()
n=4
indeg=[0]*n
for i,j in prequisites:
g[j].append(i)
indeg[i]+=1
ans=kahnsAlgo(g,indeg,n)
print(ans)
|
goyalgaurav64/Graph
|
topological-sort-kahns-algo-bfs.py
|
topological-sort-kahns-algo-bfs.py
|
py
| 868 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27643482594
|
from rest_framework.decorators import api_view
from rest_framework.response import Response
from base.serializers import ProductSerializer, UserSerializer, UserSerializerWithToken
from base.models import Product
@api_view(['GET'])
def getProducts(request):
query = request.query_params.get('keyword')
if query == None:
query = ""
products = Product.objects.filter(name__icontains=query)
serializer = ProductSerializer(products, many=True)
return Response(serializer.data)
@api_view(['GET'])
def getTopProducts(request):
products = Product.objects.filter(rating__gte=4).order_by('-rating')[0:5]
serializer = ProductSerializer(products, many=True)
return Response(serializer.data)
@api_view(['GET'])
def getProduct(request, pk):
product = Product.objects.get(_id=pk)
serializer = ProductSerializer(product, many=False)
return Response(serializer.data)
|
hitrocs-polito/smart-bozor
|
base/views/product_views.py
|
product_views.py
|
py
| 917 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15551870726
|
'''
Given an array of n integers nums, a 132 pattern is a subsequence of three integers nums[i], nums[j] and nums[k] such that i < j < k and nums[i] < nums[k] < nums[j].
Return true if there is a 132 pattern in nums, otherwise, return false.
Example 1:
Input: nums = [1,2,3,4]
Output: false
Explanation: There is no 132 pattern in the sequence.
Example 2:
Input: nums = [3,1,4,2]
Output: true
Explanation: There is a 132 pattern in the sequence: [1, 4, 2].
Example 3:
Input: nums = [-1,3,2,0]
Output: true
Explanation: There are three 132 patterns in the sequence: [-1, 3, 2], [-1, 3, 0] and [-1, 2, 0].
'''
# Stack Solution O(N) TC and O(N) Space
#we try to find 2-3-1 pattern in reversed nums.
class Solution(object):
def find132pattern(self, nums):
if len(nums) < 3:
return False
stack = [] # mono stack (decreasing)
min_val = float('-inf')
# reversed 2-3-1 pattern
for elem in reversed(nums):
if elem < min_val:
return True
while stack and stack[-1] < elem:
min_val = stack.pop()
stack.append(elem)
return False
|
ojhaanshu87/LeetCode
|
456_132_pattern.py
|
456_132_pattern.py
|
py
| 1,155 |
python
|
en
|
code
| 1 |
github-code
|
6
|
1149669859
|
from lib.contents_reader import ContentsReader
import asyncio
CLEAR_SCREEN = "\u001b[2J"
NEW_LINE = "\r\n"
class ZineFunctions:
def __init__(self, reader, writer, index_file_path):
self.reader = reader
self.writer = writer
self.contents_reader = ContentsReader(index_file_path)
async def run_index(self):
for welcome_line in self.contents_reader.read_hello_file():
self.writer.write(welcome_line)
await self.writer.drain()
# Read one byte (any key)
await self.reader.read(1)
running = True
while (running):
for index_line in self.contents_reader.read_index_lines():
self.writer.write(index_line)
item_choice = await self.reader.read(1)
item_choice_int = -1
if item_choice.upper() == 'X':
running = False
continue
item_choice_int = self.contents_reader.map_input_to_numerical_index(item_choice)
if item_choice_int == -1:
self.writer.write(f"{NEW_LINE}{NEW_LINE}Pick a story, or X to quit.{NEW_LINE}")
continue
self.writer.write(f"{NEW_LINE}{NEW_LINE}...you picked: %s" % (item_choice))
self.writer.write(f"{NEW_LINE}{NEW_LINE}...press RETURN to start reading, and to continue after each page")
await self.reader.read(1)
self.writer.write(NEW_LINE + CLEAR_SCREEN)
await asyncio.sleep(1)
await self.run_story(item_choice_int)
self.disconnect()
async def run_story(self, story_number):
page_number = 1
story_lines = self.contents_reader.read_story(story_number, page_number)
while len(story_lines) > 0:
self.writer.write(CLEAR_SCREEN)
for story_line in story_lines:
self.writer.write(story_line)
await self.writer.drain()
char_read = await self.reader.readline()
page_number += 1
story_lines = self.contents_reader.read_story(story_number, page_number)
def disconnect(self):
self.writer.close()
|
caraesten/dial_a_zine
|
dialazine/lib/zine_functions.py
|
zine_functions.py
|
py
| 2,161 |
python
|
en
|
code
| 58 |
github-code
|
6
|
39253810380
|
from mangaki.models import Artist, Manga, Genre
from django.db.utils import IntegrityError, DataError
import re
from collections import Counter
def run():
with open('../data/manga-news/manga.csv') as f:
next(f)
artists = {}
hipsters = Counter()
for i, line in enumerate(f):
# print(len(line.split(';;')))
title, vo_title, writer, mangaka, editor, origin, genre1, genre2, manga_type, synopsis, poster = line.split(';;')
for artist in [writer, mangaka]:
if artist in artists:
continue
m = re.match('^([A-ZÔÛÏ\'-]+) (.*)$', writer)
if m:
last_name, first_name = m.groups()
last_name = last_name.lower().capitalize()
if not m:
first_name = ''
last_name = artist
if Artist.objects.filter(first_name=first_name, last_name=last_name).count() == 0:
a = Artist(first_name=first_name, last_name=last_name)
a.save()
else:
a = Artist.objects.get(first_name=first_name, last_name=last_name)
artists[artist] = a
with open('../data/manga-news/manga.csv') as f:
next(f)
for i, line in enumerate(f):
title, vo_title, writer, mangaka, editor, origin, genre1, genre2, manga_type, synopsis, poster = line.split(';;')
try:
if Manga.objects.filter(title=title, vo_title=vo_title).count() == 0:
manga = Manga(title=title, vo_title=vo_title, mangaka=artists[mangaka], writer=artists[writer], editor=editor, origin=origin.lower().replace('hong kong', 'hong-kong').replace('international', 'intl'), manga_type=manga_type.lower(), source='', poster=poster, synopsis=synopsis)
manga.save()
else:
manga = Manga.objects.get(title=title, vo_title=vo_title)
if genre1:
manga.genre.add(Genre.objects.get(title=genre1))
if genre2:
manga.genre.add(Genre.objects.get(title=genre2))
except IntegrityError as err:
print(line)
print(writer)
print(err)
break
except DataError as err:
print(line)
print(origin)
print(err)
break
except Genre.DoesNotExist as err:
print(line)
print('Genres: [%s] [%s]' % (genre1, genre2))
print(err)
break
run()
|
mangaki/mangaki
|
mangaki/tools/add_manga.py
|
add_manga.py
|
py
| 2,689 |
python
|
en
|
code
| 137 |
github-code
|
6
|
19107028474
|
"""Extract data on near-Earth objects and close approaches from CSV and JSON files.
The `load_neos` function extracts NEO data from a CSV file, formatted as
described in the project instructions, into a collection of `NearEarthObject`s.
The `load_approaches` function extracts close approach data from a JSON file,
formatted as described in the project instructions, into a collection of
`CloseApproach` objects.
The main module calls these functions with the arguments provided at the command
line, and uses the resulting collections to build an `NEODatabase`.
You'll edit this file in Task 2.
"""
import csv
import json
from models import NearEarthObject, CloseApproach
def load_neos(neo_csv_path):
"""Read near-Earth object information from a CSV file.
:param neo_csv_path: A path to a CSV file containing data about near-Earth objects.
:return: A collection of `NearEarthObject`s.
"""
neo_list = []
neo_collection = []
with open(neo_csv_path) as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
neo_list.append((dict([('designation',row['pdes']), ('name', row['name']),('diameter', row['diameter']),('hazardous', row['pha'])])))
for neo_dict in neo_list:
neo_collection.append(NearEarthObject(**neo_dict))
return neo_collection
def load_approaches(cad_json_path):
"""Read close approach data from a JSON file.
:param cad_json_path: A path to a JSON file containing data about close approaches.
:return: A collection of `CloseApproach`es.
"""
cap_list = []
cap_collection = []
with open(cad_json_path, 'r') as json_file:
json_reader = json.load(json_file)
for i in range(len(json_reader['data'])):
cap_list += [dict(zip(['_designation', 'time', 'distance', 'velocity'], [json_reader['data'][i][0], json_reader['data'][i][3], json_reader['data'][i][4], json_reader['data'][i][7]]))]
for cap_dict in cap_list:
cap_collection.append(CloseApproach(**cap_dict))
return cap_collection
|
rcmadden/Near-Earth-Objects
|
extract.py
|
extract.py
|
py
| 2,061 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12639173645
|
"""
Escribe un programa que calcule las ganancias mensuales de un profesional, correspondientes a 20 días
de trabajo, teniendo en cuenta:
a. Debe ingresar el monto total por prestación realizada.
b. El programa debe descontar el 10,5% correspondiente a impuestos.
c. El programa debe mostrar por pantalla el importe bruto y neto, diario y mensual.
d. El programa debe mostrar el importe pagado en impuestos diarios y mensual.
"""
income = []
incomeWDiscont = []
taxTotal = 0
for day in range(2):
income.append(0)
bene = 1
while True:
income[day] += int(input(f"Ingrese el monto de la prestación {bene} del día {day + 1}: "))
bene += 1
while True:
val = int(input(f"Desea seguir ingresando prestaciones para el día {day + 1}? \n 1. Si \n 2. No\n"))
if val == 1 or val == 2:
break
print("Ingrese una respuesta válidad.")
if val == 2:
break
total= sum(income)
for day in income:
print(f"Importe bruto diario {day} y neto diario: {day-day*0.105}")
print(f"El importe pagado en impuestos diarios es: {day*0.105}")
taxTotal += day*0.105
print(f"Total bruto: {total}, total neto: {total-total*0.105}. Total de impuestos: {taxTotal}")
|
sbelbey/pp-python
|
Ejercicios_21_al_30/ejercicio30.py
|
ejercicio30.py
|
py
| 1,220 |
python
|
es
|
code
| 0 |
github-code
|
6
|
36060029870
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class LocalizationNetwork(nn.Module):
def __init__(self, numOfControlPoints=10):
super().__init__()
self.numOfControlPoints = numOfControlPoints
self.pool = nn.MaxPool2d(2, 2)
self.aPool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(3, 64, 3, padding=1)
self.conv2 = nn.Conv2d(64, 128, 3, padding=1)
self.conv3 = nn.Conv2d(128, 256, 3, padding=1)
self.conv4 = nn.Conv2d(256, 512, 3, padding=1)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.relu = nn.ReLU(inplace=True)
self.fc1 = nn.Linear(512, 256)
self.fc2 = nn.Linear(256, numOfControlPoints * 2)
self.init_stn()
def forward(self, x):
x = self.bn1(F.relu(self.conv1(x)))
x = self.pool(x)
x = self.bn2(F.relu(self.conv2(x)))
x = self.pool(x)
x = self.bn3(F.relu(self.conv3(x)))
x = self.pool(x)
x = self.bn4(F.relu(self.conv4(x)))
x = self.aPool(x)
x = x.view(x.size()[0], -1)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = x.view(-1, 2, self.numOfControlPoints)
return x
def init_stn(self):
interval = np.linspace(0.05, 0.95, self.numOfControlPoints // 2)
controlPoints = [[],[]]
for y in [0.1,0.9]:
for i in range(self.numOfControlPoints // 2):
controlPoints[1].append(y)
for x in interval:
controlPoints[0].append(x)
self.fc2.weight.data.zero_()
self.fc2.bias.data = torch.Tensor(controlPoints).view(-1).float().to(device)
|
xpiste05/knn_projekt
|
models/localizationNetwork.py
|
localizationNetwork.py
|
py
| 1,925 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72493111869
|
import vk_api
from vk_api.keyboard import VkKeyboard, VkKeyboardColor
main = VkKeyboard(one_time=True)
main.add_button('Создать жалобу на бандита/лидера☢', color=VkKeyboardColor.PRIMARY)
main.add_button('Создать жалобу на лидера☣', color=VkKeyboardColor.POSITIVE)
main.add_line() # создание новой строки
main.add_button('Кнопка 3', color=VkKeyboardColor.NEGATIVE)
main.add_button('Кнопка 4', color=VkKeyboardColor.SECONDARY)
nick = VkKeyboard(one_time=True)
nick.add_button('Да✅', color=VkKeyboardColor.POSITIVE)
nick.add_button('Нет⛔', color=VkKeyboardColor.NEGATIVE)
success = VkKeyboard(one_time=True)
success.add_button('Готово✅', color=VkKeyboardColor.POSITIVE)
|
Qerkdb/forum-bot
|
keyboards.py
|
keyboards.py
|
py
| 786 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
13879817833
|
N, K = map(int, input().split())
x = 0
count = 0
if N % K == 0 : # 바로 나뉠 때
while(N != 1) :
count += 1
N = N / K
elif N % K != 0 : # 바로 안 나뉠 때
x = N % K # 뺄 값들
count += x
N = N - x
while(N != 1) :
count += 1
N = N / K
print(count)
result = 0
# 처음 작성한 풀이에서는 안나눠지면 단순하게 배수일 때까지 빼고 구하면 되겠다 했는데,
# N이 K보다 작은 경우는 안따졌다
# 주의
## 책 풀이
# 1.
# while N >= K:
# while N % K != 0:
# N -= 1
# result += 1
# N // K
# result += 1
# while N > 1:
# N -= 1
# result += 1
# print(result)
# 2.
# while True:
# # (N == K로 나눠 떨어지는 수)가 될 때까지 1씩 빼기
# target = (N // K) * K
# result += (N - target)
# N = target
# # N이 K보다 작을 때(더 이상 나눌 수 없을 때) 반복문 탈출
# if N < K:
# break
# # K로 나누기
# result += 1
# N // = K
# # 마지막으로 남은 수에 대해 1씩 빼기
# result += (N - 1)
# print(result)
|
codusl100/algorithm
|
백준/그리디/1이 될 때까지.py
|
1이 될 때까지.py
|
py
| 1,147 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
13461358632
|
"""
A binary watch has 4 LEDs on the top which represent the hours (0-11), and the 6 LEDs on the bottom represent the minutes (0-59).
Each LED represents a zero or one, with the least significant bit on the right.
Given a non-negative integer n which represents the number of LEDs that are currently on, return all possible times the watch could represent.
"""
def countBits(v):
"""
:type v: int
:rtype: int
"""
count = 0
while v != 0:
if v % 2 == 1:
count += 1
v >>= 1
return count
num_bits = [countBits(i) for i in range(60)]
class Solution(object):
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
"""
results = []
for h in range(12):
for m in range(60):
time = str(h) + ":"
if num_bits[h] + num_bits[m] == num:
if m < 10:
time += "0"
time += str(m)
if time[-1] != ":":
results.append(time)
return results
ans = Solution()
print(ans.readBinaryWatch(2))
|
szhongren/leetcode
|
401/main.py
|
main.py
|
py
| 1,182 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38726912007
|
from __future__ import unicode_literals
import shutil
import os
HOME = os.path.join('pyupdater', 'vendor')
junitxml = os.path.join(HOME, 'PyInstaller', 'lib', 'junitxml', 'tests')
unittest2 = os.path.join(HOME, 'PyInstaller', 'lib', 'unittest2')
items_to_remove = [junitxml, unittest2]
def remove(x):
if os.path.isfile(x):
os.remove(x)
if os.path.isdir(x):
shutil.rmtree(x, ignore_errors=True)
def main():
for i in items_to_remove:
remove(i)
if __name__ == '__main__':
main()
|
timeyyy/PyUpdater
|
dev/fix_vendor.py
|
fix_vendor.py
|
py
| 525 |
python
|
en
|
code
| 7 |
github-code
|
6
|
14988584675
|
from setuptools import setup
package_name = 'leg_controller'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='pi',
maintainer_email='[email protected]',
description='TODO: Package description',
license='TODO: License declaration',
tests_require=['pytest'],
entry_points={
'console_scripts': [
"servo_node = leg_controller.servoController:main",
"kin_node = leg_controller.pointToAngle:main",
"animation_node = leg_controller.simpleCommands:main"
],
},
)
|
PetriJF/Hexapod
|
src/leg_controller/setup.py
|
setup.py
|
py
| 813 |
python
|
en
|
code
| 2 |
github-code
|
6
|
29157516812
|
#!/usr/bin/env python3
import asyncio
from mavsdk import System
from mavsdk.gimbal import GimbalMode, ControlMode
async def run():
# Init the drone
drone = System()
await drone.connect(system_address="udp://:14540")
# Start printing gimbal position updates
print_gimbal_position_task = \
asyncio.ensure_future(print_gimbal_position(drone))
print("Taking control of gimbal")
await drone.gimbal.take_control(ControlMode.PRIMARY)
# Set the gimbal to YAW_LOCK (= 1) mode (see docs for the difference)
# Other valid values: YAW_FOLLOW (= 0)
# YAW_LOCK will fix the gimbal pointing to an absolute direction,
# whereas YAW_FOLLOW will point relative to vehicle heading.
print("Setting gimbal mode")
await drone.gimbal.set_mode(GimbalMode.YAW_FOLLOW)
print("Look forward first")
await drone.gimbal.set_pitch_and_yaw(0, 0)
await asyncio.sleep(1)
print("Look down")
await drone.gimbal.set_pitch_and_yaw(-90, 0)
await asyncio.sleep(2)
print("Back to horizontal")
await drone.gimbal.set_pitch_and_yaw(0, 0)
await asyncio.sleep(2)
print("Slowly look up")
await drone.gimbal.set_pitch_rate_and_yaw_rate(10, 0)
await asyncio.sleep(3)
print("Back to horizontal")
await drone.gimbal.set_pitch_and_yaw(0, 0)
await asyncio.sleep(2)
print("Look right")
await drone.gimbal.set_pitch_and_yaw(0, 90)
await asyncio.sleep(2)
print("Look forward again")
await drone.gimbal.set_pitch_and_yaw(0, 0)
await asyncio.sleep(2)
print("Slowly look to the left")
await drone.gimbal.set_pitch_rate_and_yaw_rate(0, -20)
await asyncio.sleep(3)
print("Look forward again")
await drone.gimbal.set_pitch_and_yaw(0, 0)
await asyncio.sleep(2)
# Set the gimbal to track a region of interest (lat, lon, altitude)
# Units are degrees and meters MSL respectively
print("Look at a ROI (region of interest)")
await drone.gimbal.set_roi_location(47.39743832, 8.5463316, 488)
await asyncio.sleep(3)
print("Look forward again")
await drone.gimbal.set_pitch_and_yaw(0, 0)
await asyncio.sleep(2)
print("Release control of gimbal again")
await drone.gimbal.release_control()
print_gimbal_position_task.cancel()
async def print_gimbal_position(drone):
# Report gimbal position updates asynchronously
# Note that we are getting gimbal position updates in
# euler angles; we can also get them as quaternions
async for angle in drone.telemetry.camera_attitude_euler():
print(f"Gimbal pitch: {angle.pitch_deg}, yaw: {angle.yaw_deg}")
if __name__ == "__main__":
# Run the asyncio loop
asyncio.run(run())
|
mavlink/MAVSDK-Python
|
examples/gimbal.py
|
gimbal.py
|
py
| 2,709 |
python
|
en
|
code
| 246 |
github-code
|
6
|
31973217705
|
"""filename and file size in file model
Revision ID: 6d23296b922b
Revises: 6ec29c8de008
Create Date: 2023-03-02 17:47:25.025321
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6d23296b922b'
down_revision = '6ec29c8de008'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('files', schema=None) as batch_op:
batch_op.add_column(sa.Column('filename', sa.String(length=255), nullable=True))
batch_op.add_column(sa.Column('file_size', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('files', schema=None) as batch_op:
batch_op.drop_column('file_size')
batch_op.drop_column('filename')
# ### end Alembic commands ###
|
synzr/file-transfer-service
|
migrations/versions/6d23296b922b_filename_and_file_size_in_file_model.py
|
6d23296b922b_filename_and_file_size_in_file_model.py
|
py
| 952 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70943710908
|
from pytsbe.main import TimeSeriesLauncher
def multivariate_launch_example():
"""
Example how to launch benchmark with several libraries with different
parameters for multivariate time series forecasting
For more detailed info check documentation or docstring descriptions in classes below.
Important! The parameter 'predefined_model' for FEDOT framework does not launch the AutoML process.
It should be removed to use AutoML.
"""
experimenter = TimeSeriesLauncher(working_dir='./example_multivariate_launch',
datasets=['SSH'],
launches=2)
experimenter.perform_experiment(libraries_to_compare=['FEDOT'],
horizons=[20],
libraries_params={'FEDOT': {'predefined_model': 'auto'}},
validation_blocks=2,
clip_border=400)
if __name__ == '__main__':
multivariate_launch_example()
|
ITMO-NSS-team/pytsbe
|
examples/multivariate_module_launch.py
|
multivariate_module_launch.py
|
py
| 1,037 |
python
|
en
|
code
| 30 |
github-code
|
6
|
32841420589
|
import pandas as pd
import numpy as np
import pickle as pkl
import matplotlib.pyplot as plt
import re
import jieba
import subprocess
from gensim.test.utils import get_tmpfile, common_texts
from gensim.models import Word2Vec, KeyedVectors
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.manifold import TSNE
from matplotlib.font_manager import FontManager
from pylab import mpl
jieba.load_userdict('C:/Users/choose/venv/Lib/site-packages/jieba/dict.blue.txt')
def load_stopwords():
with open('util/stopwords.pkl', 'rb') as f:
stopwords = pkl.load(f)
return stopwords
def load_symbols():
ret = []
with open('util/symbols_20181216.txt', 'r', encoding='utf-8') as f:
rows = f.readlines()
f.close()
for row in rows:
if row[:-1] not in ret:
ret.append(row[:-1])
return ret
def load_pattern():
symbols = load_symbols()
symbols += ['\n', '\r\n', '\r']
symbols_str = ''
for symbol in symbols:
if symbol in '[]()-': symbol = '\\' + symbol
symbols_str += symbol
return re.compile(r'([0-9]+|\.+|[a-zA-Z])|[{}]+'.format(symbols_str))
def to_sentence(document):
ret = list()
rule = re.compile('[\W]+')
result = rule.split(document)
for sentence in result:
if len(sentence) > 0:
ret.append(sentence)
return ret
def tokenize(corpus, stopwords=load_stopwords(), pattern=re.compile(r'[\WA-Za-z0-9]+'), length_constraint=2):
tokenized_corpus = []
for doc in corpus:
tokenized_doc = jieba.lcut(doc)
words = []
for word in tokenized_doc:
if word in stopwords or pattern.match(word): continue
elif len(word) < length_constraint: continue
else: words.append(word)
tokenized_corpus.append(words)
return tokenized_corpus
|
kartd0094775/IdentifyKOL
|
util/preprocessing.py
|
preprocessing.py
|
py
| 1,846 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32149840487
|
# Upload BOJ silver-1 Brute-force 2615번 오목
# 참고 블로그 : https://velog.io/@hygge/Python-%EB%B0%B1%EC%A4%80-2615-%EC%98%A4%EB%AA%A9-Brute-Force
import sys
board = [list(map(int,input().split())) for _ in range(19)]
visited = [[0 for _ in range(19)] for _ in range(19)]
win = 0
ways = [[0,1],[1,0],[1,1],[-1,1]]
answer = []
for x in range(19):
for y in range(19):
if board[x][y]:
target = board[x][y]
for i in range(4):
cnt = 1
nx = x + ways[i][0]
ny = y + ways[i][1]
while 0 <= nx < 19 and 0 <= ny < 19 and board[nx][ny] == target:
cnt += 1
if cnt == 5:
# 육목 체크
if 0 <= x-ways[i][0] < 19 and 0 <= y-ways[i][1] < 19 and board[x-ways[i][0]][y-ways[i][1]] == target:
break
if 0 <= nx + ways[i][0] < 19 and 0 <= ny+ways[i][1] < 19 and board[nx+ways[i][0]][ny+ways[i][1]] == target:
break
print(target)
print(x+1,y+1)
sys.exit(0)
nx += ways[i][0]
ny += ways[i][1]
print(0)
|
HS980924/Algorithm
|
src/2.BruteForce/B#2615_오목.py
|
B#2615_오목.py
|
py
| 1,378 |
python
|
en
|
code
| 2 |
github-code
|
6
|
21160883826
|
import os
import math
import torch
import pytorch_lightning as pl
import torch.nn.functional as F
import torch.nn as nn
from numpy import sqrt, argmax
from torch.optim import lr_scheduler
from .model import CNN
import numpy as np
import pandas as pd
from sklearn.metrics import roc_curve, confusion_matrix, roc_auc_score
from matplotlib import pyplot
from backbone.vit_pytorch import cait, vit, deepvit
from backbone.torchvision.models_orig import resnet, densenet, inception
from factory.config import *
class Model(pl.LightningModule):
def __init__(self, model_name):
super().__init__()
self.model_name = model_name
# efficientnet-b0 ~ efficientnet-b5
if model_name == 'efficientnet-b0':
self.net = CNN(backbone="efficientnet-b0", freeze=False)
if model_name == 'efficientnet-b1':
self.net = CNN(backbone="efficientnet-b1", freeze=False)
if model_name == 'efficientnet-b2':
self.net = CNN(backbone="efficientnet-b1", freeze=False)
if model_name == 'efficientnet-b3':
self.net = CNN(backbone="efficientnet-b2", freeze=False)
if model_name == 'efficientnet-b4':
self.net = CNN(backbone="efficientnet-b4", freeze=False)
if model_name == 'efficientnet-b5':
self.net = CNN(backbone="efficientnet-b5", freeze=False)
#naive vit
elif model_name == 'vit':
self.net = vit.ViT(image_size=IMG_SIZE , patch_size=32, num_classes=2, dim=1024, depth=6, heads=16, mlp_dim=2048, dropout=0.1, emb_dropout=0.1)
#Cait
elif model_name == 'cait':
self.net = cait.CaiT(image_size=IMG_SIZE, patch_size=32, num_classes=2, dim=1024, depth=12, cls_depth=2, heads=16, mlp_dim=2048, dropout=0.1, emb_dropout=0.1, layer_dropout=0.05)
#deep vit
elif model_name == 'deepvit':
self.net = deepvit.DeepViT(image_size=IMG_SIZE, patch_size=32, num_classes=2, dim=1024, depth=6, heads=16, mlp_dim=2048, dropout=0.1, emb_dropout=0.1)
#resnet50
elif model_name == 'resnet50':
self.net = resnet.resnet50(pretrained=True)
#resnet101
elif model_name == 'resnet101':
self.net = resnet.resnet101(pretrained=True)
#resnet152
elif model_name == 'resnet152':
self.net = resnet.resnet152(pretrained=True)
#densenet121
elif model_name == 'densenet121':
self.net = densenet.densenet121(pretrained=True)
#densenet161
elif model_name == 'densenet161':
self.net = densenet.densenet161(pretrained=True)
#densenet169
elif model_name == 'densenet169':
self.net = densenet.densenet169(pretrained=True)
#densenet201
elif model_name == 'densenet201':
self.net = densenet.densenet201(pretrained=True)
#inception_v3
elif model_name == 'inception_v3':
self.net = inception.inception_v3(pretrained=True)
hidden_dim1 = 256
hidden_dim2 = 64
num_classes = 2
dropout = 0.1
self.classifier = nn.Sequential(
nn.Linear(1000, hidden_dim1),
nn.GELU(), nn.Dropout(dropout),
nn.Linear(hidden_dim1, hidden_dim2),
nn.GELU(), nn.Dropout(dropout),
nn.Linear(hidden_dim2, num_classes)
)
self.train_preds = []
self.train_gts = []
self.valid_preds = []
self.valid_gts = []
self.test_preds = []
self.test_probs = []
self.test_gts = []
def forward(self, x):
if 'efficientnet' in self.model_name:
return self.net(x)
elif 'inception' in self.model_name:
x = self.net(x)
return self.classifier(x.logits)
else:
x = self.net(x)
return self.classifier(x)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-4)
scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
return [optimizer], [scheduler]
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(x)
loss = F.cross_entropy(y_hat, y)
for gy in y:
self.train_gts.append(gy.cpu().item())
for py in y_hat:
c = torch.argmax(py)
self.train_preds.append(c.cpu().item())
self.log("loss", loss, on_epoch=True, prog_bar=True)
return loss
def training_epoch_end(self, outputs):
acc, sen, spe, ppv, npv, tn, fp, fn, tp = self.calculate_metrics(
self.train_gts, self.train_preds
)
avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
self.log("train_avg_loss", avg_loss, on_epoch=True, prog_bar=True)
self.log("train_acc", acc, on_epoch=True, prog_bar=True)
self.log("train_sensitivity(recall)", sen, on_epoch=True, prog_bar=True)
self.log("train_specificity", spe, on_epoch=True, prog_bar=True)
self.log("train_ppv(precision)", ppv, on_epoch=True, prog_bar=True)
self.log("train_npv", npv, on_epoch=True, prog_bar=True)
self.log("train_tn", tn , on_epoch=True, prog_bar=True)
self.log("train_fp", fp, on_epoch=True, prog_bar=True)
self.log("train_fn", fn, on_epoch=True, prog_bar=True)
self.log("train_tp", tp, on_epoch=True, prog_bar=True)
self.train_preds = []
self.train_gts = []
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(x)
loss = F.cross_entropy(y_hat, y)
for gy in y:
self.valid_gts.append(gy.cpu().item())
for py in y_hat:
c = torch.argmax(py)
self.valid_preds.append(c.cpu().item())
acc, sen, spe, ppv, npv, tn, fp, fn, tp = self.calculate_metrics(
self.valid_gts, self.valid_preds
)
self.log("val_bat_loss", loss, on_epoch=True, prog_bar=True)
self.log("val_acc", acc, on_epoch=True, prog_bar=True)
self.log("val_sensitivity(recall)", sen, on_epoch=True, prog_bar=True)
self.log("val_specificity", spe, on_epoch=True, prog_bar=True)
self.log("val_ppv(precision)", ppv, on_epoch=True, prog_bar=True)
self.log("val_npv", npv, on_epoch=True, prog_bar=True)
self.log("val_tn", tn , on_epoch=True, prog_bar=True)
self.log("val_fp", fp, on_epoch=True, prog_bar=True)
self.log("val_fn", fn, on_epoch=True, prog_bar=True)
self.log("val_tp", tp, on_epoch=True, prog_bar=True)
return {
"val_bat_loss": loss, "val_acc": acc,
"val_sensitivity(recall)": sen, "val_specificity": spe,
"val_ppv(precision)":ppv, "val_npv": npv,
"val_tn": tn, "val_fp": fp, "val_fn": fn, "val_tp": tp,
}
def validation_epoch_end(self, outputs):
acc, sen, spe, ppv, npv, tn, fp, fn, tp = self.calculate_metrics(
self.valid_gts, self.valid_preds
)
avg_loss = torch.stack([x['val_bat_loss'] for x in outputs]).mean()
self.log("val_avg_loss", avg_loss, on_epoch=True, prog_bar=True)
self.log("val_acc", acc, on_epoch=True, prog_bar=True)
self.log("val_sensitivity(recall)", sen, on_epoch=True, prog_bar=True)
self.log("val_specificity", spe, on_epoch=True, prog_bar=True)
self.log("val_ppv(precision)", ppv, on_epoch=True, prog_bar=True)
self.log("val_npv", npv, on_epoch=True, prog_bar=True)
self.log("val_tn", tn , on_epoch=True, prog_bar=True)
self.log("val_fp", fp, on_epoch=True, prog_bar=True)
self.log("val_fn", fn, on_epoch=True, prog_bar=True)
self.log("val_tp", tp, on_epoch=True, prog_bar=True)
self.valid_preds = []
self.valid_gts = []
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(x)
loss = F.cross_entropy(y_hat, y)
for gy in y:
self.test_gts.append(gy.cpu().item())
for py in y_hat:
c = torch.argmax(py)
p = F.softmax(py, dim=0)[1]
self.test_probs.append(p.cpu().item())
self.test_preds.append(c.cpu().item())
self.log("test_loss", loss, on_epoch=True, prog_bar=True)
return {'test_loss': loss}
def test_epoch_end(self, outputs):
acc, sen, spe, ppv, npv, tn, fp, fn, tp = self.calculate_metrics(
self.test_gts, self.test_preds
)
auc = self.calculate_auc(self.test_gts, self.test_probs)
avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
self.log("test_avg_loss", avg_loss, on_epoch=True, prog_bar=True)
self.log("test_acc", acc, on_epoch=True, prog_bar=True)
self.log("test_sensitivity(recall)", sen, on_epoch=True, prog_bar=True)
self.log("test_specificity", spe, on_epoch=True, prog_bar=True)
self.log("test_ppv(precision)", ppv, on_epoch=True, prog_bar=True)
self.log("test_npv", npv, on_epoch=True, prog_bar=True)
self.log("test_auc", auc, on_epoch=True, prog_bar=True)
self.log("test_tn", tn , on_epoch=True, prog_bar=True)
self.log("test_fp", fp, on_epoch=True, prog_bar=True)
self.log("test_fn", fn, on_epoch=True, prog_bar=True)
self.log("test_tp", tp, on_epoch=True, prog_bar=True)
print('============' * 5)
print('Accuracy : {:.4f}, Recall(Sensitivity) : {:.4f}, Specificity :{:.4f}, PPV(Precision) : {:.4f}, NPV : {:.4f}, Auc : {:.4f}, Confusion : ( TP-{} | FP-{} | FN-{} | TN-{} )'.format(acc, sen, spe, ppv, npv, auc, tp, fp, fn, tn))
print('============' * 5)
dfGTs = pd.DataFrame(np.round_(np.array(self.test_gts)))
dfPreds = pd.DataFrame(np.round_(np.array(self.test_preds)))
dfProbs = pd.DataFrame(np.round_(np.array(self.test_probs) * 100, 3))
pd.concat([dfGTs, dfPreds, dfProbs], axis=1).to_csv('./test.csv', index=False)
def calculate_metrics(self, gts, preds):
tn, fp, fn, tp = confusion_matrix(gts, preds, labels=[0,1]).ravel()
if math.isnan(tn): tn = 0
if math.isnan(fp): fp = 0
if math.isnan(fn): fn = 0
if math.isnan(tp): tp = 0
acc = (tp + tn) / (tn + fp + fn + tp)
sen = tp / (tp + fn)
spe = tn / (tn + fp)
ppv = tp / (tp + fp)
npv = tn / (tn + fn)
if math.isnan(acc): acc = 0
if math.isnan(sen): sen = 0
if math.isnan(spe): spe = 0
if math.isnan(ppv): ppv = 0
if math.isnan(npv): npv = 0
return np.float32(acc), np.float32(sen), np.float32(spe), np.float32(ppv), np.float32(npv), tn, fp, fn, tp
def calculate_auc(self, gts, probs):
try:
auc = roc_auc_score(gts, probs)
ns_probs = [0 for _ in range(len(gts))]
lr_probs = probs
ns_auc = roc_auc_score(gts, ns_probs)
lr_auc = roc_auc_score(gts, lr_probs)
ns_fpr, ns_tpr, _ = roc_curve(gts, ns_probs)
lr_fpr, lr_tpr, _ = roc_curve(gts, lr_probs)
# calculate g-mean for each threshold
gmeans = sqrt(lr_tpr * (1-lr_fpr))
ix = argmax(gmeans)
# plot True, Predict, Best
pyplot.scatter(lr_fpr[ix], lr_tpr[ix], marker='*', color='black', label='Best')
pyplot.text(lr_fpr[ix] + 0.05, lr_tpr[ix] - 0.05, "FPR: {}\nTPR: {}".format(lr_fpr[ix], lr_tpr[ix]), fontsize=7)
pyplot.plot(ns_fpr, ns_tpr, linestyle='--', label='True')
pyplot.plot(lr_fpr, lr_tpr, marker=',', label='Predict (auc={})'.format(round(auc, 3)))
pyplot.xlabel('False Positive Rate (1 - Specificity)')
pyplot.ylabel('True Positive Rate (Sensitivity)')
pyplot.legend()
pyplot.savefig('test_roc.png', dpi=600)
except ValueError:
auc=0
return auc
|
Junkkkk/ovarian_cancer_detection
|
models/lightning_model.py
|
lightning_model.py
|
py
| 12,263 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73019628988
|
# Bot information
SESSION = 'Media_search'
USER_SESSION = 'User_Bot'
API_ID = 12345
API_HASH = '0123456789abcdef0123456789abcdef'
BOT_TOKEN = '123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11'
USERBOT_STRING_SESSION = ''
# Bot settings
CACHE_TIME = 300
USE_CAPTION_FILTER = False
# Admins, Channels & Users
ADMINS = [12345789, 'admin123', 98765432]
CHANNELS = [-10012345678, -100987654321, 'channelusername']
AUTH_USERS = []
AUTH_CHANNEL = None
# MongoDB information
DATABASE_URI = "mongodb://[username:password@]host1[:port1][,...hostN[:portN]][/[defaultauthdb]?retryWrites=true&w=majority"
DATABASE_NAME = 'Telegram'
COLLECTION_NAME = 'channel_files' # If you are using the same database, then use different collection name for each bot
# Messages
START_MSG = """
**Hi, I'm Media Search bot**
Here you can search files in inline mode. Just press follwing buttons and start searching.
"""
SHARE_BUTTON_TEXT = 'Checkout {username} for searching files'
INVITE_MSG = 'Please join @.... to use this bot'
|
Mahesh0253/Media-Search-bot
|
sample_info.py
|
sample_info.py
|
py
| 1,000 |
python
|
en
|
code
| 514 |
github-code
|
6
|
7705288630
|
import json
import torch
from transformers import GPT2Tokenizer
from transformers import GPT2DoubleHeadsModel
from MTDNN import MTDNN
from tqdm import trange, tqdm
from keras_preprocessing import sequence
import pandas as pd
import Utils
import pickle
import os
from torch.utils.data import TensorDataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
import datetime
SPECIAL_TOKENS = ['<pad>', '<eos>', '<rstokn>', '<bos>', '<question>', '<commonsensetask>', '<cose>', '<openbook>']
ATTR_TO_SPECIAL_TOKEN = {'bos_token': '<bos>', 'pad_token': '<pad>', 'eos_token': '<eos>',
'additional_special_tokens': ['<rstokn>', '<question>', '<reply>', '<commonsensetask>', '<cose>', '<openbook>']
}
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logs_dir_tensorboard = "runs2nomcs/" + (str(current_time) + "morecheckpoint-melco-update-ros")
writer = SummaryWriter(logs_dir_tensorboard)
device = 'cuda:5'
def data_preprocess():
final_data = []
questions = []
choices = []
label = []
facts = []
file_name = 'data/OpenBookFacts/train_complete.jsonl'
for line in open(file_name, 'r') :
data = (json.loads(line))
questions.append(data['question']['stem'])
choices.append([data['question']['choices'][0]['text'], data['question']['choices'][1]['text'],
data['question']['choices'][2]['text'], data['question']['choices'][3]['text']])
if data['answerKey'] == 'A' :
answer = 0
elif data['answerKey'] == 'B' :
answer = 1
elif data['answerKey'] == 'C' :
answer = 2
else:
answer = 3
label.append(answer)
facts.append(data['fact1'])
openBook_Data = [["openBook"], questions, choices, label, facts]
final_data.append(openBook_Data)
file_name = 'data/CoS-E/cose_train_data.csv'
data = pd.read_csv(file_name)
final_data.append([["CoS-E"], data])
file_name_1 = 'data/commonsense/subtaskB_data_all-2.csv'
file_name_2 = 'data/commonsense/subtaskC-alldata.csv'
data1 = pd.read_csv(file_name_1)
data2 = pd.read_csv(file_name_2)
data = data1.merge(data2, on='FalseSent').dropna()
final_data.append([["commonsense"], data]) # leave the last 500
return final_data
def convert_to_tokens(input, tokenizer):
if isinstance(input, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(input))
elif isinstance(input, list):
return [
tokenizer.convert_tokens_to_ids(tokenizer.tokenize(val))
if not isinstance(val, int) else val
for val in input
]
elif isinstance(input, pd.Series):
input = input.tolist()
return [
tokenizer.convert_tokens_to_ids(tokenizer.tokenize(val))
if not isinstance(val, int) else val
for val in input
]
else:
import sys
print("Conversion Error")
sys.exit()
def padding_falsesent_choices(datas, tokenizer):
pad = [tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[0])]
eos = [tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[1])]
rstokn = [tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[2])]
bos = [tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[3])]
questons = [tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[4])]
commonsensetask = [tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[5])]
COSE = [tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[6])]
openBook = [tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[7])]
choice_padding = -1
input_ids = []
lm_labels = []
token_type_ids = []
mc_token_ids = []
mc_labels = []
max_length = 128
for data in datas:
if data[0] == ["openBook"]:
for question, choices, labels, facts in zip( data[1], data[2], data[3], data[4]):
# /mc_labels = []
question, choices, facts = convert_to_tokens(question, tokenizer), convert_to_tokens(choices, tokenizer), convert_to_tokens(facts, tokenizer)
input1 = [bos + openBook + rstokn + question + rstokn + choices[0] + rstokn + choices[1] + rstokn + choices[2] + rstokn + choices[3] + eos]
input2 = [bos + openBook + rstokn + question + rstokn + facts + eos]
mc_token_ids.append(len(input1[0]))
mc_token_ids.append(len(input2[0]))
input1 = sequence.pad_sequences(input1, maxlen=max_length, padding='post', value=pad)
input_ids.append(input1[0])
fakechoice = sequence.pad_sequences([[-1]], maxlen=max_length, padding='post', value=choice_padding)
lm_labels.append(fakechoice[0])
tt_id1 = [
(len(openBook) + 1) * rstokn + (len(question) + 1) * questons + (len(choices[0]) + 1) * rstokn +
(len(choices[1]) + 1) * questons + (len(choices[2]) + 1) * rstokn + (len(choices[3]) + 2) * questons
]
tt_id1 = sequence.pad_sequences(tt_id1, maxlen=max_length, padding='post', value=pad)
token_type_ids.append(tt_id1[0])
input2 = sequence.pad_sequences(input2, maxlen=max_length, padding='post', value=pad)
input_ids.append(input2[0])
choice = [[-1] * (len(openBook) + 2) + [-1] * len(question) + [-1] + facts + eos]
choice = sequence.pad_sequences(choice, maxlen=max_length, padding='post', value=choice_padding)
lm_labels.append(choice[0])
tt_id2 = [(len(openBook) + 1) * rstokn + (len(question) + 1) * questons + (len(choices[labels]) + 2) * rstokn]
tt_id2 = sequence.pad_sequences(tt_id2, maxlen=max_length, padding='post', value=pad)
token_type_ids.append(tt_id2[0])
mc_labels.append(labels)
mc_labels.append(labels)
elif data[0] == ["CoS-E"]:
for idx, value in data[1].iterrows():
value = value[1:]
value = convert_to_tokens(value, tokenizer)
input1 = [bos + COSE + rstokn + value[1] + rstokn + value[2] + rstokn + value[3] + rstokn + value[4] +
rstokn + value[5] + rstokn + value[6] + eos]
input2 = [bos + COSE + rstokn + value[1] + rstokn + value[8] + eos]
mc_token_ids.append(len(input1[0]))
mc_token_ids.append(len(input2[0]))
input1 = sequence.pad_sequences(input1, maxlen= max_length, padding='post', value=pad)
input_ids.append(input1[0])
fakechoice = sequence.pad_sequences([[-1]], maxlen=max_length, padding='post', value=choice_padding)
lm_labels.append(fakechoice[0])
tt_id1 = [(len(COSE) + 1) * rstokn + (len(value[1]) + 1) * questons + (len(value[2]) + 1) * rstokn +
(len(value[3]) + 1) * questons + (len(value[4]) + 1) * rstokn + (len(value[5]) + 1) * questons +
(len(value[6]) + 2) * rstokn]
tt_id1 = sequence.pad_sequences(tt_id1, maxlen= max_length, padding='post', value=pad)
token_type_ids.append(tt_id1[0])
input2 = sequence.pad_sequences(input2, maxlen=max_length, padding='post', value=pad)
input_ids.append(input2[0])
choice = [[-1] * (len(COSE) + 2) + [-1] * len(value[1]) + [-1] + value[8] + eos]
choice = sequence.pad_sequences(choice, maxlen=max_length, padding='post', value=choice_padding)
lm_labels.append(choice[0])
tt_id2 = [(len(COSE) + 1) * rstokn + (len(value[1]) + 1) * questons + (len(value[8]) +2) * rstokn]
tt_id2 = sequence.pad_sequences(tt_id2, maxlen=max_length, padding='post', value=pad)
token_type_ids.append(tt_id2[0])
mc_labels.append(value[7])
mc_labels.append(value[7])
elif data[0] == ["commonsense"]:
for idx, value in data[1].iterrows():
# call tokenizer
value = convert_to_tokens(value, tokenizer)
input1 = [bos + commonsensetask + rstokn + value[1] + rstokn + value[2] + rstokn + value[3] + rstokn + value[4]+ eos]
ml = input1
input1 = sequence.pad_sequences(input1, maxlen=max_length, padding='post', value=pad)
fakechoice = sequence.pad_sequences([[-1]], maxlen=max_length, padding='post', value=choice_padding)
tt_id1 = [
(len(commonsensetask) + 1) * rstokn + (len(value[1]) + 1) * questons + (len(value[2]) + 1) * rstokn +
(len(value[3]) + 1) * questons + (len(value[4]) + 2) * rstokn
]
tt_id1 = sequence.pad_sequences(tt_id1, maxlen=max_length, padding='post', value=pad)
for i in range(3):
mc_token_ids.append(len(ml))
input_ids.append(input1[0])
lm_labels.append(fakechoice[0])
token_type_ids.append(tt_id1[0])
input2 = [bos + commonsensetask + rstokn + value[1] + rstokn + value[7 + i] + eos]
mc_token_ids.append(len(input2[0]))
input2 = sequence.pad_sequences(input2, maxlen=max_length, padding='post', value=pad)
input_ids.append(input2[0])
choice = [[-1] + [-1] * len(commonsensetask) + [-1] + [-1] * len(value[1]) + [-1] + value[7 + i] + eos]
choice = sequence.pad_sequences(choice, maxlen=max_length, padding='post', value=choice_padding)
lm_labels.append(choice[0])
tt_id2 = [(len(commonsensetask) + 1) * rstokn + (len(value[1]) + 1) * questons + (len(value[7 + i]) + 2) * rstokn]
tt_id2 = sequence.pad_sequences(tt_id2, maxlen=max_length, padding='post', value=pad)
token_type_ids.append(tt_id2[0])
mc_labels.append(value[5])
mc_labels.append(value[5])
# mc_labels.append(0)
return input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels
def converting_tokens(data, tokenizer):
print("Converting tokens to ids ...", flush=True)
input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels = padding_falsesent_choices(data, tokenizer)
input_ids = torch.tensor(input_ids)
input_ids = input_ids.view((-1, 2) + input_ids.shape[1:])
mc_token_ids = torch.tensor(mc_token_ids)
mc_token_ids = mc_token_ids.view((-1, 2) + mc_token_ids.shape[1:])
lm_labels = torch.tensor(lm_labels)
lm_labels = lm_labels.view((-1, 2) + lm_labels.shape[1:])
token_type_ids = torch.tensor(token_type_ids)
token_type_ids = token_type_ids.view((-1, 2) + token_type_ids.shape[1:])
mc_labels = torch.tensor(mc_labels)
mc_labels = mc_labels.view((-1, 2) + mc_labels.shape[1:])
pickle.dump(input_ids, open("data/pickle/input_ids.p", "wb"))
pickle.dump(mc_token_ids, open("data/pickle/mc_token_ids.p", "wb"))
pickle.dump(lm_labels, open("data/pickle/lm_labels.p", "wb"))
pickle.dump(token_type_ids, open("data/pickle/token_type_ids.p", "wb"))
pickle.dump(mc_labels, open("data/pickle/mc_labels.p", "wb"))
return input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels
def train(model, optimizer, scheduler, train_data, output_dir, num_train_epochs, tokenizer, lm_coef, mc_coef,gradient_accumulation_steps, mgpu, temp=[], valid_data = []):
training_loss = {}
evaluation_loss = {}
global_steps = 0
for epochs in range(num_train_epochs):
model.train()
print("Training start for epoch {}".format(epochs), flush=True)
nb_tr_steps, tr_loss = 0, 0
optimizer.zero_grad()
lm_sub_batch_loss, mc_sub_batch_loss, sub_batch_loss = 0, 0, 0
print("sub_batch_loss \t lm_sub_batch_loss \t mc_sub_batch_loss")
for step, batch in (enumerate(train_data)):
model.train()
batch = tuple(t.to(device).type(torch.cuda.LongTensor) for t in batch)
input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels = batch
lm_loss, mc_loss, *_ = model(
input_ids, token_type_ids=token_type_ids, mc_token_ids=mc_token_ids,
mc_labels=mc_labels, lm_labels=lm_labels, task=input_ids[0][0][1]
)
mc_loss = mc_loss[0]
del input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels
loss = (lm_loss * lm_coef) + (mc_loss * mc_coef)
loss = loss.mean()
loss /= gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
lm_sub_batch_loss += lm_loss.item()
mc_sub_batch_loss += mc_loss.item()
sub_batch_loss += loss.item()
if (global_steps + 1) % gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step()
# global_steps +=1
optimizer.zero_grad()
print("{} \t {} \t {}".format(sub_batch_loss, lm_sub_batch_loss/gradient_accumulation_steps, mc_sub_batch_loss/gradient_accumulation_steps))
writer.add_scalar('Training batch loss', sub_batch_loss, global_steps+1)
writer.add_scalar('Training lm batch loss', lm_sub_batch_loss/gradient_accumulation_steps, global_steps+1)
writer.add_scalar('Training mc batch loss', mc_sub_batch_loss/gradient_accumulation_steps, global_steps+1)
training_loss[(global_steps+1)] = (sub_batch_loss, lm_sub_batch_loss/gradient_accumulation_steps, mc_sub_batch_loss/gradient_accumulation_steps)
lm_sub_batch_loss, mc_sub_batch_loss, sub_batch_loss = 0, 0, 0
if (global_steps + 1) % 800 == 0:
eval_loss, eval_lm_loss, eval_mc_loss = evaluate_gpt2(model, valid_data)
print("{} \t {} \t {}".format(eval_mc_loss, eval_lm_loss, eval_mc_loss))
writer.add_scalar('Eval total loss - 100', eval_loss, (global_steps + 1))
writer.add_scalar('Eval total LM loss - 100', eval_lm_loss, (global_steps + 1))
writer.add_scalar('Eval total MC loss - 100', eval_mc_loss, (global_steps + 1))
evaluation_loss[(global_steps + 1)] = (eval_loss, eval_lm_loss, eval_mc_loss)
if not os.path.exists(output_dir + '/' + str(global_steps + 1)):
os.makedirs(output_dir + '/' + str(global_steps + 1))
torch.save(model, output_dir + '/' + str(global_steps + 1) + '/' + str(global_steps + 1) + '.pt')
# model.save_state_dict(output_dir + '/' + str(global_steps + 1))
global_steps += 1
print("Epoch Completed at Step Size {}".format(global_steps))
if not os.path.exists(output_dir + '/' + '_epoch_' + str(epochs)):
os.makedirs(output_dir + '/' + '_epoch_' + str(epochs))
torch.save(model, output_dir + '/' + '_epoch_' + str(epochs) + '/' + str(epochs) + '.pt')
# model.save_state_dict(output_dir + '/' + '_epoch_' + str(epochs))
pickle.dump(training_loss, open("data/pickle/training_loss-melco-update.p", "wb"))
pickle.dump(evaluation_loss, open("data/pickle/evaluation_loss-melco-update.p", "wb"))
return model
def evaluate_gpt2(model, valid_data):
lm_sub_batch_loss, mc_sub_batch_loss = 0, 0
model.eval()
print("\n *************************Evaluation************************************ \n")
for step, batch in (enumerate(valid_data)):
batch = tuple(t.to(device).type(torch.cuda.LongTensor) for t in batch)
input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels = batch
lm_loss, mc_loss, *_ = model(
input_ids, token_type_ids=token_type_ids, mc_token_ids=mc_token_ids,
mc_labels=mc_labels, lm_labels=lm_labels, task=input_ids[0][0][1]
)
del input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels
lm_sub_batch_loss += lm_loss.item()
mc_sub_batch_loss += mc_loss[0].item()
return (lm_sub_batch_loss + mc_sub_batch_loss)/len(valid_data), (lm_sub_batch_loss)/len(valid_data), (mc_sub_batch_loss)/len(valid_data)
def main():
flag = True
mgpu = True
output_dir= 'checkpoints-More-melco-new'
epochs = 3
gradient_accumulation_steps = 8
lm_coef, mc_coef = 1, 0
token_class = GPT2Tokenizer
model_Class = MTDNN
gpt_model = model_Class.from_pretrained('omcs/-Final')
# gpt_model = model_Class.from_pretrained('gpt2-large')
gpt_tokenizer = token_class.from_pretrained('omcs/-Final', do_lower_case=True)
# gpt_tokenizer = token_class.from_pretrained('gpt2-large', do_lower_case=True)
gpt_model, gpt_tokenizer = Utils.add_special_tokens(gpt_model, gpt_tokenizer, ATTR_TO_SPECIAL_TOKEN)
gpt_model.to(device)
#gpt_model = torch.nn.DataParallel(gpt_model, output_device=1, device_ids=[0, 1])
cache_input_ids, cache_mc_token_ids, cache_lm_labels, cache_token_type_ids, cache_mc_labels = \
"data/pickle/input_ids.p", "data/pickle/mc_token_ids.p", "data/pickle/lm_labels.p", "data/pickle/token_type_ids.p", "data/pickle/mc_labels.p"
if flag and os.path.exists(cache_input_ids) and os.path.exists(cache_mc_token_ids) and os.path.exists(
cache_lm_labels) and os.path.exists(cache_token_type_ids) and os.path.exists(cache_mc_labels):
print("Token ids loaded from previous processed file ... ", flush=True)
input_ids, mc_token_ids, lm_labels, token_type_ids, mc_labels = pickle.load(open(cache_input_ids, "rb")), pickle.load(open(cache_mc_token_ids, "rb")), \
pickle.load(open(cache_lm_labels, "rb")), pickle.load(open(cache_token_type_ids, "rb")), \
pickle.load(open(cache_mc_labels, "rb"))
else:
data = data_preprocess()
input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels = converting_tokens(data, gpt_tokenizer)
temp = [input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels]
train_data, valid_data = Utils.build_dataloader((input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels))
train_data, valid_data = Utils.generate_batch(train_data, valid_data, 1)
t_total = len(train_data) / epochs
learning_rate, adam_epsilon, weight_decay, warmup_steps = 1e-5, 1e-8, 0, 0
optimizer, scheduler = Utils.optimizer_generater(gpt_model, learning_rate, adam_epsilon, weight_decay, warmup_steps, t_total)
model = train(gpt_model, optimizer, scheduler, train_data, output_dir, epochs, gpt_tokenizer, lm_coef, mc_coef, gradient_accumulation_steps,
mgpu, temp, valid_data)
print("End of execution", flush=True)
output_dir = output_dir + '/' + 'final'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
gpt_tokenizer.save_pretrained(output_dir)
if __name__ == '__main__':
main()
|
anandhperumal/ANA-at-SemEval-2020-Task-4-UNION
|
MTD-NCH.py
|
MTD-NCH.py
|
py
| 19,668 |
python
|
en
|
code
| 5 |
github-code
|
6
|
18164640711
|
import pickle
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
# Download NLTK data (you only need to do this once)
nltk.download('stopwords')
nltk.download('wordnet')
# Load the trained model and vectorizer
with open('check_spam_classifier.pkl', 'rb') as clf_file:
clf = pickle.load(clf_file)
with open('check_spam_vectorizer.pkl', 'rb') as vectorizer_file:
vectorizer = pickle.load(vectorizer_file)
# Load labels from the text file
with open('labels.txt', 'r') as labels_file:
labels = labels_file.read().splitlines()
# Define stopwords and lemmatizer
stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
def preprocess_input(text):
# Preprocess the input text in the same way as the training data
text = text.lower()
text = ' '.join([word for word in text.split() if word not in stop_words])
text = ' '.join([lemmatizer.lemmatize(word) for word in text.split()])
return text
def is_scam(input_text):
# Preprocess the input text
input_text = preprocess_input(input_text)
# Vectorize the preprocessed text
input_text_tfidf = vectorizer.transform([input_text])
# Make a prediction
prediction = clf.predict(input_text_tfidf)
# Get the label using the labels list
predicted_label = labels[prediction[0]]
return predicted_label
if __name__ == "__main__":
user_input = input("Enter text to check if it's a scam: ")
result = is_scam(user_input)
print(f"Predicted label: {result}")
|
GOVINDFROMINDIA/Twitter-Scam-Victims
|
dsg.py
|
dsg.py
|
py
| 1,596 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41794735960
|
import datetime
import unittest
from pyspark import SparkConf
from pyspark.sql import SparkSession
import pyspark.sql.functions as f
from pyspark.sql.types import StructType, StructField, IntegerType, StringType, MapType, ArrayType
import json
import csv
from src.transformations import add_columns, running_total, group_sales_by_type
# TODO: Include testing output map and array data
class TestTransformations(unittest.TestCase):
def setUp(self) -> None:
print("Setting up Spark")
conf = SparkConf().set("spark.driver.memory", "8g")
self.spark = SparkSession \
.builder \
.master("local[4]") \
.config(conf=conf) \
.appName("test simple transformation") \
.getOrCreate()
def test_add_columns(self):
# Create test data with each row as tuple
test_data = [(1, 2), (3, 4), (5, 6)]
# Create test DataFrame from the test data, pass the column names as required
test_df = self.spark.createDataFrame(data=test_data, schema=["first", "second"])
# Show data-frame
test_df.show(truncate=False)
# Execute transformation on the test data-frame and show the results
result_df = test_df.transform(add_columns)
result_df.show(truncate=False)
# Validate column
result_columns = result_df.columns
self.assertIn("sum", result_columns)
# Get rest column out of the data frame as list
result_data = result_df.select("sum").collect()
result_data = [item["sum"] for item in result_data]
# Validate result column values
self.assertListEqual(result_data, [3, 7, 11])
def test_map_data(self):
test_data = [
(1, "product_1", "2022-11-01", {"store 1": 12, "store 2": 3, "online": 5}),
(2, "product_1", "2022-11-02", {"store 1": 5, "online": 2}),
(3, "product_1", "2022-11-04", {"store 1": 8, "store 2": 12, "online": 11}),
(4, "product_1", "2022-11-05", {"store 1": 3, "store 2": 3})
]
test_df = self.spark.createDataFrame(test_data, schema=["order_id", "product", "date", "sales"])
test_df.show(truncate=False)
test_df.printSchema()
test_df_schema = StructType([
StructField(name="order_id", dataType=IntegerType(), nullable=False),
StructField(name="product", dataType=StringType(), nullable=False),
StructField(name="date", dataType=StringType(), nullable=False),
StructField(name="sales", dataType=MapType(StringType(), IntegerType(), valueContainsNull=False), nullable=False),
])
test_df = self.spark.createDataFrame(test_data, schema=test_df_schema)
test_df.show(truncate=False)
test_df.printSchema()
def test_list_data(self):
test_data = [
(1, "product_1", "2022-11-01", "2022-11-05", [3, 4, 6, 7, 12]),
(2, "product_1", "2022-11-06", "2022-11-12", [8, 4, 3, 1, 16, 13, 25]),
(3, "product_1", "2022-11-13", "2022-11-15", [3, 3, 6]),
(4, "product_2", "2022-11-01", "2022-11-07", [1, 12, 6, 9, 12, 2, 2]),
]
test_df_schema = StructType([
StructField(name="order_id", dataType=IntegerType(), nullable=False),
StructField(name="product", dataType=StringType(), nullable=False),
StructField(name="start_date", dataType=StringType(), nullable=False),
StructField(name="end_date", dataType=StringType(), nullable=False),
StructField(name="sales", dataType=ArrayType(IntegerType()), nullable=False),
])
test_df = self.spark.createDataFrame(test_data, schema=test_df_schema)\
.withColumn("start_date", f.to_date("start_date"))\
.withColumn("end_date", f.to_date("end_date"))
test_df.show(truncate=False)
test_df.printSchema()
sales_data_raw = test_df.select("sales").collect()
print(sales_data_raw)
sales_data = [item["sales"] for item in sales_data_raw]
print(sales_data)
print(type(sales_data))
print([[type(item) for item in data] for data in sales_data])
self.assertListEqual(
sales_data,
[[3, 4, 6, 7, 12], [8, 4, 3, 1, 16, 13, 25], [3, 3, 6], [1, 12, 6, 9, 12, 2, 2]]
)
def test_group_sales_by_type(self):
# Create test data
test_data = [
(1, "product_1", "online", "2022-11-01", 8),
(2, "product_1", "online", "2022-11-02", 6),
(3, "product_1", "online", "2022-11-04", 12),
(4, "product_1", "retail", "2022-11-01", 11),
(5, "product_1", "retail", "2022-11-02", 15),
(6, "product_1", "retail", "2022-11-03", 22),
(7, "product_1", "retail", "2022-11-04", 21),
(8, "product_2", "online", "2022-11-02", 1),
(9, "product_2", "online", "2022-11-03", 3),
(10, "product_2", "retail", "2022-11-01", 1),
(11, "product_2", "retail", "2022-11-02", 5),
(12, "product_2", "retail", "2022-11-04", 2)
]
# Define test data schema
test_df_schema = StructType([
StructField(name="id", dataType=IntegerType(), nullable=False),
StructField(name="product", dataType=StringType(), nullable=False),
StructField(name="sale_type", dataType=StringType(), nullable=False),
StructField(name="sale_date", dataType=StringType(), nullable=False),
StructField(name="num_sales", dataType=IntegerType(), nullable=False),
])
# Create test DataFrame
test_df = self.spark.createDataFrame(test_data, schema=test_df_schema)\
.withColumn("sale_date", f.to_date("sale_date"))
# Print the data frame and its schema
test_df.show(truncate=False)
test_df.printSchema()
# Run the transformation on test data
grouped_data = test_df.transform(group_sales_by_type)
grouped_data.show(truncate=False)
grouped_data.printSchema()
# Collect results to validate
validation_cols = grouped_data.select("sale_dates", "num_sales").collect()
sale_dates = [item['sale_dates'] for item in validation_cols]
num_sales = [item['num_sales'] for item in validation_cols]
# Print sale_dates column result
print(sale_dates)
# Create and validate expected `sale_dates` result
expected_sale_dates = [
[
datetime.datetime.strptime("2022-11-01", "%Y-%m-%d").date(),
datetime.datetime.strptime("2022-11-02", "%Y-%m-%d").date(),
datetime.datetime.strptime("2022-11-04", "%Y-%m-%d").date()
], [
datetime.datetime.strptime("2022-11-01", "%Y-%m-%d").date(),
datetime.datetime.strptime("2022-11-02", "%Y-%m-%d").date(),
datetime.datetime.strptime("2022-11-03", "%Y-%m-%d").date(),
datetime.datetime.strptime("2022-11-04", "%Y-%m-%d").date()
],
[
datetime.datetime.strptime("2022-11-02", "%Y-%m-%d").date(),
datetime.datetime.strptime("2022-11-03", "%Y-%m-%d").date()
],
[
datetime.datetime.strptime("2022-11-01", "%Y-%m-%d").date(),
datetime.datetime.strptime("2022-11-02", "%Y-%m-%d").date(),
datetime.datetime.strptime("2022-11-04", "%Y-%m-%d").date(),
]
]
self.assertListEqual(sale_dates, expected_sale_dates)
# Validate number of sales result
self.assertListEqual(num_sales, [[8, 6, 12], [11, 15, 22, 21], [1, 3], [1, 5, 2]])
def test_create_struct_data(self):
# Create test data
test_data = [
(1, "product_1", "2022-11-01", {"retail": 8, "online": 12}),
(2, "product_1", "2022-11-02", {"retail": 3}),
(3, "product_1", "2022-11-03", {"retail": 5, "online": 2}),
(4, "product_1", "2022-11-04", {"online": 8}),
(5, "product_2", "2022-11-02", {"retail": 2, "online": 1}),
(6, "product_2", "2022-11-03", {"retail": 3, "online": 2}),
]
# Define test data schema
test_df_schema = StructType([
StructField(name="id", dataType=IntegerType(), nullable=False),
StructField(name="product", dataType=StringType(), nullable=False),
StructField(name="sale_date", dataType=StringType(), nullable=False),
StructField(name="num_sales", dataType=StructType([
StructField("retail", IntegerType(), nullable=True),
StructField("online", IntegerType(), nullable=True),
]))
])
# Create test DataFrame
test_df = self.spark.createDataFrame(test_data, schema=test_df_schema) \
.withColumn("sale_date", f.to_date("sale_date"))
# Print the data frame and its schema
test_df.show(truncate=False)
test_df.printSchema()
# method 1 - process the nested Row instances:
num_sales = test_df.select("num_sales").collect()
print(num_sales)
online_sales = [item['num_sales']['online'] for item in num_sales]
retail_sales = [item['num_sales']['retail'] for item in num_sales]
self.assertListEqual(online_sales, [12, None, 2, 8, 1, 2])
self.assertListEqual(retail_sales, [8, 3, 5, None, 2, 3])
# method 2 - select to separate columns
num_sales_method_2 = test_df.select("num_sales").select("num_sales.*").collect()
print(num_sales_method_2)
online_sales_method_2 = [item['online'] for item in num_sales_method_2]
retail_sales_method_2 = [item['retail'] for item in num_sales_method_2]
self.assertListEqual(online_sales_method_2, [12, None, 2, 8, 1, 2])
self.assertListEqual(retail_sales_method_2, [8, 3, 5, None, 2, 3])
# method 3 - convert the struct column to json
num_sales_method_3 = test_df.withColumn("num_sales", f.to_json(f.col("num_sales"))).select("num_sales").collect()
print(num_sales_method_3)
online_sales_method_3 = [
json.loads(item['num_sales'])['online'] if 'online' in json.loads(item['num_sales']) else None
for item in num_sales_method_3
]
retail_sales_method_3 = [
json.loads(item['num_sales'])['retail'] if 'retail' in json.loads(item['num_sales']) else None
for item in num_sales_method_3
]
self.assertListEqual(online_sales_method_3, [12, None, 2, 8, 1, 2])
self.assertListEqual(retail_sales_method_3, [8, 3, 5, None, 2, 3])
def test_running_total(self):
# # Option 1 - provide a date column
# test_data = [
# (1, "product_1", datetime.strptime("2022-11-01", "%Y-%m-%d").date(), 1),
# (2, "product_1", datetime.strptime("2022-11-03", "%Y-%m-%d").date(), 1),
# (3, "product_1", datetime.strptime("2022-11-04", "%Y-%m-%d").date(), 3),
# (4, "product_1", datetime.strptime("2022-11-05", "%Y-%m-%d").date(), 2),
# (5, "product_2", datetime.strptime("2022-11-02", "%Y-%m-%d").date(), 4),
# (6, "product_2", datetime.strptime("2022-11-04", "%Y-%m-%d").date(), 3),
# ]
# Option 2 - input date as string and cast in Spark
test_data = [
(1, "product_1", "2022-11-01", 1),
(2, "product_1", "2022-11-03", 1),
(3, "product_1", "2022-11-04", 3),
(4, "product_1", "2022-11-05", 2),
(5, "product_2", "2022-11-02", 4),
(6, "product_2", "2022-11-04", 3),
]
test_df_columns = ["order_id", "product", "order_date", "qty"]
test_df = self.spark.createDataFrame(test_data, test_df_columns)\
.withColumn("order_date", f.to_date("order_date"))
test_df.show(truncate=False)
test_df.printSchema()
result_df = test_df.transform(running_total)
result_df.show(truncate=False)
result_data = result_df.select("running_sum_qty").collect()
result_data = [item['running_sum_qty'] for item in result_data]
self.assertListEqual(result_data, [1, 2, 5, 7, 4, 7])
def test_group_sales_by_type_from_file(self):
# Define test data schema
test_df_schema = StructType([
StructField(name="id", dataType=IntegerType(), nullable=False),
StructField(name="product", dataType=StringType(), nullable=False),
StructField(name="sale_type", dataType=StringType(), nullable=False),
StructField(name="sale_date", dataType=StringType(), nullable=False),
StructField(name="num_sales", dataType=IntegerType(), nullable=False),
])
# Read test data from .csv file
test_df = self.spark.read.option("header", True).schema(test_df_schema).csv("test_data/test_data.csv")
test_df.show(truncate=False)
test_df.printSchema()
# Perform the transformation
result_df = test_df.transform(group_sales_by_type)
result_df.show(truncate=False)
result_df.printSchema()
# Extract result data frame to list
result_data_raw = result_df.select("num_sales").collect()
result_data = [item["num_sales"] for item in result_data_raw]
# Load expected data
with open("test_data/test_result.csv", mode='r') as file_handle:
expected_data = [json.loads(line[0]) for line in csv.reader(file_handle)]
print(f"Expected data: {expected_data}")
self.assertListEqual(result_data, expected_data)
|
SA01/spark-unittest-tutorial
|
tests/test_transformations.py
|
test_transformations.py
|
py
| 13,745 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23210233427
|
import pandas as pd
from morpheus import SequentialComposition, ParallelComposition
from morpheus.algo.selection import base_selection_algorithm, random_selection_algorithm
from morpheus.utils.encoding import *
from morpheus.utils import debug_print
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
VERBOSITY = 0
def default_dataset(n_features=7, random_state=997):
"""
Generate a dataset to be used in tests.
Returns:
"""
X, y = make_classification(
n_samples=10 ** 3,
n_features=n_features,
n_informative=n_features,
n_repeated=0,
n_redundant=0,
n_clusters_per_class=2,
random_state=random_state,
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=random_state
)
train = pd.DataFrame(X_train)
train = train.assign(y=y_train)
test = pd.DataFrame(X_test)
test = test.assign(y=y_test)
return train, test
def default_chain(random_state=997):
"""
Default classifier chain.
For use in further tests.
Returns:
"""
train, _ = default_dataset(random_state=random_state)
m_list = default_m_list_for_chain(train.values)
sc = SequentialComposition()
for m in m_list:
sc.add_estimator(m, location="back")
return sc
def default_ensemble(random_state=997):
"""
Default classifier ensmeble.
For use in further tests.
Returns:
"""
train, _ = default_dataset(random_state=random_state)
m_list = default_m_list_for_ensemble(train.values)
pc = ParallelComposition()
for m in m_list:
pc.add_estimator(m)
return pc
def default_m_list_for_chain(data):
targ_ids_1 = [4, 5]
desc_ids_1 = [0, 1, 2]
targ_ids_2 = [7]
desc_ids_2 = [1, 2, 5]
all_desc_ids = [desc_ids_1, desc_ids_2]
all_targ_ids = [targ_ids_1, targ_ids_2]
m_list = []
ids = zip(all_desc_ids, all_targ_ids)
for desc_ids, targ_ids in ids:
msg = """
Learning model with desc ids: {}
targ ids: {}
""".format(
desc_ids, targ_ids
)
print(msg)
if set(targ_ids).issubset({6, 7}):
learner = RandomForestClassifier
elif set(targ_ids).issubset({0, 1, 2, 3, 4, 5}):
learner = RandomForestRegressor
else:
msg = """
Cannot learn mixed (nominal/numeric) models
"""
raise ValueError(msg)
# Learn a model for desc_ids-targ_ids
m = learn_model(data, desc_ids, targ_ids, learner, max_depth=5, n_estimators=5)
m_list.append(m)
return m_list
def default_m_list_for_ensemble(data):
targ_ids_1 = [5]
desc_ids_1 = [0, 1, 2]
targ_ids_2 = [4, 5]
desc_ids_2 = [0, 1, 3]
all_desc_ids = [desc_ids_1, desc_ids_2]
all_targ_ids = [targ_ids_1, targ_ids_2]
m_list = []
ids = zip(all_desc_ids, all_targ_ids)
for desc_ids, targ_ids in ids:
msg = """
Learning model with desc ids: {}
targ ids: {}
""".format(
desc_ids, targ_ids
)
print(msg)
if set(targ_ids).issubset({6, 7}):
learner = RandomForestClassifier
elif set(targ_ids).issubset({0, 1, 2, 3, 4, 5}):
learner = RandomForestRegressor
else:
msg = """
Cannot learn mixed (nominal/numeric) models
"""
raise ValueError(msg)
# Learn a model for desc_ids-targ_ids
m = learn_model(data, desc_ids, targ_ids, learner, max_depth=5, n_estimators=5)
m_list.append(m)
return m_list
def default_m_list_for_mercs(data):
n, m = data.shape
attributes = list(range(m))
metadata = {"nb_atts": m}
settings = {"param": 1, "its": 1}
m_codes = base_selection_algorithm(metadata, settings)
all_desc_ids, all_targ_ids = [], []
for m_code in m_codes:
desc_ids, targ_ids, _ = code_to_query(m_code)
all_desc_ids.append(desc_ids)
all_targ_ids.append(targ_ids)
m_list = []
ids = zip(all_desc_ids, all_targ_ids)
for desc_ids, targ_ids in ids:
msg = """
Learning model with desc ids: {}
targ ids: {}
""".format(
desc_ids, targ_ids
)
print(msg)
if set(targ_ids).issubset(attributes[-1:]):
learner = RandomForestClassifier
elif set(targ_ids).issubset(attributes[:-1]):
learner = RandomForestRegressor
else:
msg = """
Cannot learn mixed (nominal/numeric) models
"""
raise ValueError(msg)
# Learn a model for desc_ids-targ_ids
m = learn_model(data, desc_ids, targ_ids, learner, max_depth=5, n_estimators=5)
m_list.append(m)
return m_list
def random_m_list_for_mercs(data, its=1, fraction=0.3, random_state=997):
n, m = data.shape
attributes = list(range(m))
metadata = {"nb_atts": m}
settings = {"param": 1, "its": its, "fraction": fraction}
m_codes = random_selection_algorithm(metadata, settings, random_state=random_state)
all_desc_ids, all_targ_ids = [], []
for m_code in m_codes:
desc_ids, targ_ids, _ = code_to_query(m_code)
all_desc_ids.append(desc_ids)
all_targ_ids.append(targ_ids)
m_list = []
ids = zip(all_desc_ids, all_targ_ids)
for desc_ids, targ_ids in ids:
msg = """
Learning model with desc ids: {}
targ ids: {}
""".format(
desc_ids, targ_ids
)
debug_print(msg, level=1, V=VERBOSITY)
if set(targ_ids).issubset(attributes[-1:]):
learner = RandomForestClassifier
elif set(targ_ids).issubset(attributes[:-1]):
learner = RandomForestRegressor
else:
msg = """
Cannot learn mixed (nominal/numeric) models
"""
raise ValueError(msg)
# Learn a model for desc_ids-targ_ids
m = learn_model(
data,
desc_ids,
targ_ids,
learner,
max_depth=5,
n_estimators=5,
random_state=random_state,
)
m_list.append(m)
return m_list
def learn_model(data, desc_ids, targ_ids, model, **kwargs):
"""
Learn a model from the data.
The desc ids and targ ids identify which algo task
you should try to learn from the data.
Model is a machine learning method that has a .fit() method.
Args:
data:
desc_ids:
targ_ids:
model:
**kwargs:
Returns:
"""
X, Y = data[:, desc_ids], data[:, targ_ids]
if X.shape[1] == 1:
X = X.ravel()
if Y.shape[1] == 1:
Y = Y.ravel()
try:
clf = model(**kwargs)
clf.fit(X, Y)
except ValueError as e:
print(e)
# Bookkeeping
clf.desc_ids = desc_ids
clf.targ_ids = targ_ids
return clf
|
eliavw/morpheus
|
src/morpheus/tests/basics.py
|
basics.py
|
py
| 7,311 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39729133373
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0010_auto_20151113_1608'),
]
operations = [
migrations.AddField(
model_name='review',
name='author',
field=models.CharField(default=b'Anonymous', max_length=30),
),
]
|
midnitehighways/shop
|
store/migrations/0011_review_author.py
|
0011_review_author.py
|
py
| 420 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36867613594
|
from datetime import datetime
from sqlalchemy import Column, TIMESTAMP
class TimestampsMixin:
__abstract__ = True
__created_at_name__ = 'created_at'
__updated_at_name__ = 'updated_at'
__datetime_func__ = datetime.now()
created_at = Column(
__created_at_name__,
TIMESTAMP(timezone=False),
default=__datetime_func__,
nullable=False
)
updated_at = Column(
__updated_at_name__,
TIMESTAMP(timezone=False),
default=__datetime_func__,
onupdate=__datetime_func__,
nullable=False
)
|
siarie/fastapi-start
|
app/db/mixins.py
|
mixins.py
|
py
| 581 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14555529648
|
from time import sleep
import btc
import click
from core import BitcoinTwitterProfile
import schedule
@click.group()
def bitc0in_twitter():
"""
Syncs your twitter profile with bitcoin's volatility.
"""
@bitc0in_twitter.command()
def run():
"""Start Program"""
bitcoin_percent_change = btc.get_percent_change()
profile = BitcoinTwitterProfile(bitcoin_percent_change=bitcoin_percent_change)
def job():
bitcoin_percent_change = btc.get_percent_change()
state = profile.get_market_state(bitcoin_percent_change)
if state == "bearish":
profile.dumping()
else:
profile.pumping()
schedule.every(10).minutes.do(job)
while True:
schedule.run_pending()
sleep(1)
# print(".", end="", flush=True)
@bitc0in_twitter.command()
def test():
"""Tests everything is setup correctly."""
click.echo("TESTING!!!")
bms = BitcoinTwitterProfile(bitcoin_percent_change=5)
bms.dumping()
click.echo("check the for bearish profile")
click.echo(f"state: {bms.state}")
click.echo("Sleeping for 15 seconds.")
sleep(15)
bms.pumping()
click.echo("check the for bullish profile")
click.echo(f"state: {bms.state}")
sleep(15)
bms.dumping()
if __name__ == "__main__":
bitc0in_twitter()
|
dgnsrekt/bitc0in-twitter
|
bitc0in_twitter/cli.py
|
cli.py
|
py
| 1,335 |
python
|
en
|
code
| 1 |
github-code
|
6
|
29282262756
|
# -*- coding: utf-8 -*-
import ispformat.schema as _schema
from jsonschema import Draft4Validator, RefResolver, draft4_format_checker
from jsonschema.exceptions import RefResolutionError, ValidationError
from urlparse import urlsplit
class MyRefResolver(RefResolver):
def resolve_remote(self, uri):
# Prevent remote resolving
raise RefResolutionError("LOL NOPE")
geojson_allowed_types=('Polygon', 'MultiPolygon')
def validate_geojson_type(d):
"""
Make sure a geojson dict only contains allowed geometry types
"""
type_=d.get('type')
if type_ not in geojson_allowed_types:
return False
return True
def validate_geojson(geodict):
"""
Convenience function to validate a geojson dict
"""
_version = 0.1
schema = _schema.load_schema(_version, 'geojson/geojson')
v = Draft4Validator(
schema,
resolver=MyRefResolver.from_schema(schema, store=_schema.deps_for_version(_version)),
format_checker=draft4_format_checker,
)
for err in v.iter_errors(geodict):
return False
if not validate_geojson_type(geodict):
return False
return True
def validate_isp(jdict):
"""
Validate a json-object against the isp json-schema
"""
if not 'version' in jdict:
raise ValidationError(u'version is a required property')
try:
schema=_schema.versions[jdict['version']]
except (AttributeError, TypeError, KeyError):
raise ValidationError(u'version %r unsupported'%jdict['version'])
v=Draft4Validator(
schema,
resolver=MyRefResolver.from_schema(schema, store=_schema.deps_for_version(jdict['version'])),
format_checker=draft4_format_checker,
)
for err in v.iter_errors(jdict):
yield err
def is_valid_url(u):
try:
pu=urlsplit(u)
except:
return False
if pu.scheme not in ('', 'http', 'https'):
return False
if not pu.netloc:
return False
return True
if 'website' in jdict and not is_valid_url(jdict['website']):
yield ValidationError(u'%r must be an absolute HTTP URL'%u'website',
instance=jdict[u'website'], schema=schema[u'properties'][u'website'],
path=[u'website'], schema_path=[u'properties', u'website', u'description'],
validator=u'validate_url', validator_value=jdict['website'])
if 'logoURL' in jdict and not is_valid_url(jdict['logoURL']):
yield ValidationError(u'%r must be an absolute HTTP URL'%u'logoURL',
instance=jdict[u'logoURL'], schema=schema[u'properties'][u'logoURL'],
path=[u'logoURL'], schema_path=[u'properties', u'logoURL', u'description'],
validator=u'validate_url', validator_value=jdict['logoURL'])
sch=schema[u'properties'][u'otherWebsites'][u'patternProperties'][u'^.+$']
for name, url in jdict.get('otherWebsites', {}).iteritems():
if is_valid_url(url):
continue
yield ValidationError(u'%r must be an absolute HTTP URL'%name,
instance=url, schema=sch, path=[u'otherWebsite', name],
schema_path=[u'properties', u'otherWebsites', u'patternProperties', u'^.+$', 'description'],
validator=u'validate_url', validator_value=url)
for i, ca in enumerate(jdict.get('coveredAreas', [])):
area=ca.get('area')
if area and validate_geojson_type(area):
continue
elif not area:
continue
yield ValidationError(
u'GeoJSON can only contain the following types: %s'%repr(geojson_allowed_types),
instance=ca, schema=schema[u'definitions'][u'coveredArea'][u'properties'][u'area'],
path=['coveredAreas', i, 'area'],
schema_path=[u'properties', u'coveredAreas', u'items', u'properties', u'area'],
validator=u'validate_geojson_type', validator_value=ca
)
|
Psycojoker/isp-format
|
ispformat/validator/schemavalidator.py
|
schemavalidator.py
|
py
| 4,121 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12014916109
|
'''
Find the nearest smaller numbers on left side in an array
Given an array of integers, find the nearest smaller number for every element such that the smaller element is on left side.
Examples:
Input: arr[] = {1, 6, 4, 10, 2, 5}
Output: {_, 1, 1, 4, 1, 2}
First element ('1') has no element on left side. For 6,
there is only one smaller element on left side '1'.
For 10, there are three smaller elements on left side (1,
6 and 4), nearest among the three elements is 4.
Input: arr[] = {1, 3, 0, 2, 5}
Output: {_, 1, _, 0, 2}
Expected time complexity is O(n).
https://www.geeksforgeeks.org/find-the-nearest-smaller-numbers-on-left-side-in-an-array/
'''
array = [1, 6, 4, 10, 2, 5]
stack = []
for element in array:
while (stack and stack[-1] > element):
stack.pop()
print(stack[-1] if stack else "_")
stack.append(element)
|
umr55766/warmup
|
Find-the-nearest-smaller-numbers-on-left-side-in-an-array.py
|
Find-the-nearest-smaller-numbers-on-left-side-in-an-array.py
|
py
| 876 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6401924379
|
# version: python 3.7
# zID: z5052292
from socket import *
from datetime import datetime
import time
import sys
serverIP = sys.argv[1]
serverPort = int(sys.argv[2])
clientSocket = socket(AF_INET, SOCK_DGRAM)
list_rtts = []
packets_lost = 0
for i in range(10):
time_stamp = datetime.now().isoformat(sep=' ')[:-3]
ping_message = "PING" + str(i) + ' ' + time_stamp + '\r\n'
time_send = datetime.now()
clientSocket.sendto(ping_message.encode(), (serverIP, serverPort))
try:
clientSocket.settimeout(1)
response, severAddress = clientSocket.recvfrom(2048)
time_receive = datetime.now()
rtt = round((time_receive - time_send).total_seconds() * 1000)
list_rtts.append(rtt)
print(f'Ping to {serverIP}, seq = {i}, rtt = {rtt} ms')
clientSocket.settimeout(None)
except timeout:
packets_lost += 1
print(f'Ping to {serverIP}, seq = {i}, rtt = time out')
print("\n")
print(f'Minimun RTT = {min(list_rtts)} ms')
print(f'Maximun RTT = {max(list_rtts)} ms')
print(f'Average RTT = {round(float(sum(list_rtts)/len(list_rtts)))} ms')
print(f'10 packets transmitted, {10 - int(packets_lost)} packets received, {float(packets_lost) / 10 * 100}% of packets loss.')
clientSocket.close()
|
YuanG1944/COMP9331-Computer-Networks-and-Applications
|
Lab2/PingClient_zhou.py
|
PingClient_zhou.py
|
py
| 1,235 |
python
|
en
|
code
| 4 |
github-code
|
6
|
10426011052
|
"""Conceptual model page."""
from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import RichTextField
from wagtail.admin.edit_handlers import FieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
class CMPage(Page):
template = "ecos_cm/cm_page.html"
ECOLOGICAL_PROCESSES = "ecological processes"
TARGET_SPECIES = "target species"
CONCELTUAL_MODEL_TYPE_CHOICES = [
(ECOLOGICAL_PROCESSES,'ecological processes'),
(TARGET_SPECIES, 'target species'),
]
conceptual_model_type = models.CharField(
choices=CONCELTUAL_MODEL_TYPE_CHOICES,
max_length=100,
default=ECOLOGICAL_PROCESSES,
)
cm_title = models.CharField(max_length=300, null=True, blank=True)
cm_image = models.ForeignKey(
"wagtailimages.Image",
null=True,
blank=False,
on_delete=models.SET_NULL,
related_name="+"
)
cm_human_interactions = RichTextField( null=True, blank=True)
cm_ecolagical_processes = RichTextField( null=True, blank=True)
cm_oceanographic_variables = RichTextField( null=True, blank=True)
cm_performance_indicators = RichTextField( null=True, blank=True)
content_panels = Page.content_panels + [
FieldPanel("conceptual_model_type"),
FieldPanel("cm_title"),
ImageChooserPanel("cm_image"),
FieldPanel("cm_human_interactions"),
FieldPanel("cm_ecolagical_processes"),
FieldPanel("cm_oceanographic_variables"),
FieldPanel("cm_performance_indicators"),
]
|
CNR-ISMAR/ecoads
|
ecos_cm/models.py
|
models.py
|
py
| 1,600 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4534058436
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import SPARQLWrapper
# REF [site] >> https://sparqlwrapper.readthedocs.io/en/latest/main.html
def select_example():
sparql = SPARQLWrapper.SPARQLWrapper("http://vocabs.ardc.edu.au/repository/api/sparql/csiro_international-chronostratigraphic-chart_geologic-time-scale-2020")
sparql.setReturnFormat(SPARQLWrapper.JSON)
# Gets the first 3 geological ages from a Geological Timescale database, via a SPARQL endpoint.
sparql.setQuery("""
PREFIX gts: <http://resource.geosciml.org/ontology/timescale/gts#>
SELECT *
WHERE {
?a a gts:Age .
}
ORDER BY ?a
LIMIT 3
"""
)
try:
ret = sparql.queryAndConvert()
for r in ret["results"]["bindings"]:
print(r)
except Exception as ex:
print(ex)
# REF [site] >> https://sparqlwrapper.readthedocs.io/en/latest/main.html
def ask_example():
sparql = SPARQLWrapper.SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setQuery("""
ASK WHERE {
<http://dbpedia.org/resource/Asturias> rdfs:label "Asturias"@es
}
"""
)
sparql.setReturnFormat(SPARQLWrapper.XML)
try:
results = sparql.query().convert()
print(results.toxml())
except Exception as ex:
print(ex)
# REF [site] >> https://sparqlwrapper.readthedocs.io/en/latest/main.html
def construct_example():
sparql = SPARQLWrapper.SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setQuery("""
PREFIX dbo: <http://dbpedia.org/ontology/>
PREFIX sdo: <https://schema.org/>
CONSTRUCT {
?lang a sdo:Language ;
sdo:alternateName ?iso6391Code .
}
WHERE {
?lang a dbo:Language ;
dbo:iso6391Code ?iso6391Code .
FILTER (STRLEN(?iso6391Code)=2) # To filter out non-valid values.
}
LIMIT 3
"""
)
try:
results = sparql.queryAndConvert()
print(results.serialize())
except Exception as ex:
print(ex)
# REF [site] >> https://sparqlwrapper.readthedocs.io/en/latest/main.html
def describe_example():
sparql = SPARQLWrapper.SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setQuery("DESCRIBE <http://dbpedia.org/resource/Asturias>")
try:
results = sparql.queryAndConvert()
print(results.serialize(format="json-ld"))
except Exception as ex:
print(ex)
# REF [site] >> https://sparqlwrapper.readthedocs.io/en/latest/main.html
def update_example():
sparql = SPARQLWrapper.SPARQLWrapper("https://example.org/sparql")
sparql.setHTTPAuth(SPARQLWrapper.DIGEST)
sparql.setCredentials("some-login", "some-password")
sparql.setMethod(SPARQLWrapper.POST)
sparql.setQuery("""
PREFIX dbp: <http://dbpedia.org/resource/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
WITH <http://example.graph>
DELETE {
dbo:Asturias rdfs:label "Asturies"@ast
}
"""
)
try:
results = sparql.query()
print(results.response.read())
except Exception as ex:
print(ex)
# REF [site] >> https://sparqlwrapper.readthedocs.io/en/latest/main.html
def SPARQLWrapper2_example():
sparql = SPARQLWrapper.SPARQLWrapper2("http://dbpedia.org/sparql")
sparql.setQuery("""
PREFIX dbp: <http://dbpedia.org/resource/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?label
WHERE {
dbp:Asturias rdfs:label ?label
}
LIMIT 3
"""
)
try:
for result in sparql.query().bindings:
print(f"{result['label'].lang}, {result['label'].value}")
except Exception as ex:
print(ex)
# REF [site] >> https://sparqlwrapper.readthedocs.io/en/latest/main.html
def partial_interpretation_of_results():
sparql = SPARQLWrapper.SPARQLWrapper2("http://example.org/sparql")
sparql.setQuery("""
SELECT ?subj ?prop
WHERE {
?subj ?prop ?obj
}
"""
)
try:
ret = sparql.query()
print(ret.variables) # This is an array consisting of "subj" and "prop".
for binding in ret.bindings:
# Each binding is a dictionary. Let us just print the results.
print(f"{binding['subj'].value}, {binding['subj'].type}")
print(f"{binding['prop'].value}, {binding['prop'].type}")
except Exception as ex:
print(ex)
#-----
sparql.setQuery("""
SELECT ?subj ?obj ?opt
WHERE {
?subj <http://a.b.c> ?obj .
OPTIONAL {
?subj <http://d.e.f> ?opt
}
}
"""
)
try:
ret = sparql.query()
print(ret.variables) # This is an array consisting of "subj", "obj", "opt".
if ("subj", "prop", "opt") in ret:
# There is at least one binding covering the optional "opt", too.
bindings = ret["subj", "obj", "opt"]
# Bindings is an array of dictionaries with the full bindings.
for b in bindings:
subj = b["subj"].value
o = b["obj"].value
opt = b["opt"].value
# Do something nice with subj, o, and opt.
# Another way of accessing to values for a single variable: take all the bindings of the "subj", "obj", "opt".
subjbind = ret.getValues("subj") # An array of Value instances.
objbind = ret.getValues("obj") # An array of Value instances.
optbind = ret.getValues("opt") # An array of Value instances.
except Exception as ex:
print(ex)
def dbpedia_test():
sparql = SPARQLWrapper.SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setReturnFormat(SPARQLWrapper.JSON)
if True:
sparql.setQuery("""
SELECT ?uri ?name ?page ?nick
WHERE {
?uri a foaf:Person ;
foaf:name ?name;
foaf:page ?page;
foaf:nick ?nick.
}
LIMIT 100
"""
)
elif False:
sparql.setQuery("""
SELECT ?name ?birth ?role
WHERE{
?x a foaf:Person ;
dbpprop:fullname ?name;
dbpprop:countryofbirth ?birth;
dbpprop:role ?role.
FILTER regex(?birth, "land$").
FILTER regex(?birth, "^Eng").
FILTER regex(?birth, "England").
} LIMIT 100
"""
)
try:
ret = sparql.queryAndConvert()
print(ret["results"]["bindings"])
except Exception as ex:
print(ex)
def dbpedia_ko_test():
sparql = SPARQLWrapper.SPARQLWrapper("http://ko.dbpedia.org/sparql")
sparql.setReturnFormat(SPARQLWrapper.JSON)
if False:
sparql.setQuery("""
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dbo: <http://dbpedia.org/ontology/>
PREFIX dbp: <http://ko.dbpedia.org/property/>
SELECT DISTINCT ?comment
WHERE {
?s foaf:name ?name;
rdfs:comment ?comment;
dbp:occupation ?occupation.
FILTER(REGEX(STR(?occupation), '정치'))
}
LIMIT 30
"""
)
elif False:
sparql.setQuery("""
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dbo: <http://dbpedia.org/ontology/>
PREFIX dbp: <http://ko.dbpedia.org/property/>
SELECT ?comment, ?relative, ?parent
WHERE {
?s foaf:name ?name;
rdfs:comment ?comment.
FILTER(STR(?name) = '하정우')
OPTIONAL{?relative dbo:relative ?s.}
OPTIONAL{?parent dbo:child ?s.}
}
LIMIT 30
"""
)
elif True:
sparql.setQuery("""
select * where {
?s <http://ko.dbpedia.org/property/장소> ?o
} LIMIT 100
"""
)
elif False:
sparql.setQuery("""
PREFIX dbo: <http://dbpedia.org/ontology/>
PREFIX dbp: <http://ko.dbpedia.org/property/>
PREFIX res: <http://ko.dbpedia.org/resource/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
select * where {
?s rdf:type foaf:Person.
?s <http://ko.dbpedia.org/property/국가> '대한민국'@ko.
}
"""
)
elif False:
sparql.setQuery("""
PREFIX dbo: <http://dbpedia.org/ontology/>
PREFIX dbp: <http://ko.dbpedia.org/property/>
PREFIX res: <http://ko.dbpedia.org/resource/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
select count(*) where {
?s rdf:type foaf:Person.
{?s dbp:출생일 ?Bdate.} UNION {?s dbp:사망일 ?Ddate.}
?s dbo:abstract ?abstract.
?s dbp:국적 ?nation.
}
"""
)
try:
ret = sparql.queryAndConvert()
print(ret["results"]["bindings"])
except Exception as ex:
print(ex)
def main():
#select_example()
#ask_example()
#construct_example()
#describe_example()
#update_example()
#SPARQLWrapper2_example()
#partial_interpretation_of_results()
#-----
dbpedia_test()
dbpedia_ko_test()
#--------------------------------------------------------------------
if "__main__" == __name__:
main()
|
sangwook236/SWDT
|
sw_dev/python/ext/test/database/sparqlwrapper_test.py
|
sparqlwrapper_test.py
|
py
| 7,970 |
python
|
en
|
code
| 17 |
github-code
|
6
|
38030500642
|
import numpy as np
import matplotlib.pyplot as plt
from tqdm.auto import tqdm
"""
Reads Siemens rawdata file and returns the DICOs values
Author: Ali Aghaeifar <[email protected]>
"""
def read_dico(twixObj):
mdb_vop = [mdb for mdb in twixObj[-1]['mdb'] if mdb.is_flag_set('MDH_VOP')]
# concatenate segments of RFs longer than 1ms
DICO_comb = []
for mdb in tqdm(mdb_vop, desc='Reading DICO'):
if mdb.mdh.Counter.Ide == 0:
DICO_comb.append(mdb.data)
else:
DICO_comb[-1] = np.concatenate((DICO_comb[-1],mdb.data), axis=1)
DICO = []
shapes = [dico.shape for dico in DICO_comb] # all shapes
shapes = sorted(set(shapes), key=shapes.index) # unique shapes
for i, shape in enumerate(shapes):
temp = [dico for dico in tqdm(DICO_comb, desc=f'RF Pulse {i}') if dico.shape == shape]
DICO.append(np.stack(temp, axis=-1))
forward = [dico_frw[::2] for dico_frw in DICO]
reflect = [dico_rfl[1::2] for dico_rfl in DICO]
return forward, reflect
# memory optimized version, but slower. Only save integral of forward signal
def read_dico_memOpt(twixObj):
mdb_vop = [mdb for mdb in twixObj[-1]['mdb'] if mdb.is_flag_set('MDH_VOP')]
forward_integral = []
forward_length = []
for mdb in tqdm(mdb_vop, desc = 'Reading DICO'):
DICO_integral = np.sum(np.abs(mdb.data[::2]), axis=1)
DICO_length = mdb.data.shape[1]
if mdb.mdh.Counter.Ide == 0:
forward_integral.append(DICO_integral)
forward_length.append(DICO_length)
else:
forward_integral[-1] = forward_integral[-1] + DICO_integral
forward_length[-1] = forward_length[-1] + DICO_length
forward_integral = np.stack(forward_integral, axis=-1)
# split RFs with different lengths
forward_length_unq = sorted(set(forward_length), key=forward_length.index)
forward_integral = [forward_integral[:, np.where(np.array(forward_length) == l)[0]] for l in forward_length_unq]
return forward_integral, forward_length_unq
def plot_drift(twixObj):
forward_integral, _ = read_dico_memOpt(twixObj)
for dico in forward_integral:
_, ax = plt.subplots()
ax.plot(forward_integral[0].squeeze().T)
plt.show()
|
aghaeifar-publications/RFPA_drift
|
dico_tools.py
|
dico_tools.py
|
py
| 2,283 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39451137948
|
from tkinter import Tk, StringVar, Label, Button, Entry, filedialog, W
from os.path import exists
import generator as gp
def cmdExec():
if checkFileExist(textIn.get()) and checkFileExist(textOut.get()):
result.set("Gerando planilha de presença ...")
isSuccess = gp.main(textIn.get(), textOut.get())
if isSuccess:
result.set("Planilha de presença gerada com successo")
else:
result.set("Ocorreu um erro ao tentar gerar a planilha de presença")
def checkFileExist(file):
if(file != ""):
name = file.split('/')
if exists(file):
return True
result.set(f"Erro: Arquivo de {name[-1]} não encontrado")
return False
result.set("Erro: Campo vazio")
return False
def cmdSearchFileIn():
filename = filedialog.askopenfilename()
result.set("")
textIn.set(filename)
def cmdSearchFileOut():
filename = filedialog.askopenfilename()
result.set("")
textOut.set(filename)
screen = Tk()
screen.title("Gerador de Lista de Presença")
textIn = StringVar()
textOut = StringVar()
result = StringVar()
# pos screen
width, height = 500, 200
widthScreen = screen.winfo_screenwidth()
heightScreen = screen.winfo_screenheight()
posX = int(widthScreen/2 - width/2)
posY = int(heightScreen/2 - height/2)
screen.geometry(f"{width}x{height}+{posX}+{posY}")
# Labels
labelFileIn = Label(screen, text="Escolha o arquivo de entrada:").grid(row=0, sticky=W)
labelFileOut = Label(screen, text="Escolha o arquivo de saída:").grid(row=2, sticky=W)
labelResult = Label(screen, textvariable=result).grid(row=5, pady=10)
# Text box
textBoxFileIn = Entry(screen, textvariable=textIn).grid(row=1, padx=5, pady=5 ,ipadx=120)
textBoxFileOut = Entry(screen, textvariable=textOut).grid(row=3, padx=5, pady=5, ipadx=120)
# Butões
btnSearchFileIn = Button(screen, text= "Buscar", command=cmdSearchFileIn).grid(row=1, column=1)
btnSearchFileOut = Button(screen, text= "Buscar", command=cmdSearchFileOut).grid(row=3, column=1)
btnExec = Button(screen, text= "Executar", command=cmdExec).grid(row=4, pady=10)
screen.mainloop()
|
lucasgbezerra/python_projects
|
attendance_sheet/app.py
|
app.py
|
py
| 2,139 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70488508987
|
# accepted on codewars.com
import random
import math
import time
conflicts_threshold = 3
# main method
def solve_n_queens(size, mandatory_coords):
# here we use the simple bactracking
if size <= 10:
answer = queens_backtrack(size, mandatory_coords)
return get_string_of_queens(size, answer) if answer is not None else None
attempts_made = 1
while True:
partial_sol = generate_greed(size, mandatory_coords)
solution = conflicts_solver(partial_sol, size, mandatory_coords)
if not solution and (attempts_made <= 15):
# print(str(size) + ": no solution been found", sep='')
attempts_made += 1
else:
if attempts_made <= 15:
# print(str(size) + ": SOLUTION BEEN FOUND!!!", sep='')
# print("Time elapsed: ", time.time() - start, "seconds.")
return get_string_of_queens(size, solution)
else:
# print(str(size) + ": Tries ended up", sep='')
# print("Time elapsed", time.time() - start, "seconds.")
return None
# here we are building a partial solution for the task given
def generate_greed(n, mandatory: list[int]):
# initializating
positions = [-1] * n
verticals = [-1] * n
diagonals = [[0 for x in range((2 * n) - 1)] for y in range(2)]
# mandatory queen
positions[mandatory[0]] = mandatory[1]
verticals[mandatory[1]] = 1
diagonals[0][mandatory[0] + mandatory[1]] = 1
diagonals[1][n - 1 + mandatory[0] - mandatory[1]] = 1
marked_rows = []
cols = set()
for c in range(n):
if c == mandatory[1]:
continue
cols.add(c)
for row in range(n):
if row == mandatory[0]:
continue
for col in cols:
if (diagonals[0][row + col] == 0) and (diagonals[1][row + ((n - 1) - col)] == 0):
positions[row] = col
verticals[col] = 1
diagonals[0][row + col] = 1
diagonals[1][row + ((n - 1) - col)] = 1
cols.remove(col)
break
if positions[row] == -1:
marked_rows.append(row)
for row in marked_rows:
col = cols.pop()
positions[row] = col
verticals[col] = 1
diagonals[0][row + col] += 1
diagonals[1][row + ((n - 1) - col)] += 1
return [positions, verticals, diagonals]
# now fixing the prev solution, the way is: min conflicts algorithm
def conflicts_solver(queens, size, mandatory):
diagonals = queens.pop()
verticals = queens.pop()
positions = queens.pop()
length = len(positions)
problem_cols = get_conflicts([positions, verticals, diagonals], mandatory)
swaps_counter = 0
err_flag = False
def place(r, c):
verticals[c] -= 1
diagonals[0][r + c] -= 1
diagonals[1][r + ((length - 1) - c)] -= 1
def displace(r, new_c):
verticals[new_c] += 1
diagonals[0][r + new_c] += 1
diagonals[1][r + ((length - 1) - new_c)] += 1
while problem_cols:
random.shuffle(problem_cols)
row = problem_cols.pop()
conflicts = []
min_conflicts = []
the_min = math.inf
for col in range(length):
if col == mandatory[1]:
conflicts.append(0)
continue
conflicts.append(0)
conflicts[col] += verticals[col]
conflicts[col] += diagonals[0][row + col]
conflicts[col] += diagonals[1][row + ((length - 1) - col)]
if col == positions[row]:
conflicts[col] = math.inf
if conflicts[col] < the_min:
min_conflicts = []
the_min = conflicts[col]
if conflicts[col] == the_min:
min_conflicts.append(col)
# now let's swap
random.shuffle(min_conflicts)
swap = min_conflicts.pop()
col = positions[row]
place(row, col)
displace(row, swap)
positions[row] = swap
# restriction for no solution or bad variation
if swaps_counter < size * conflicts_threshold:
problem_cols = get_conflicts([positions, verticals, diagonals], mandatory)
else:
err_flag = True
break
swaps_counter += 1
return [] if err_flag else positions
# getting all the queens in conflicts:
def get_conflicts(array, mandatory):
diagonals = array.pop()
verticals = array.pop()
positions = array.pop()
length = len(positions)
conflicts = []
for col in range(length):
if col != mandatory[1]:
row = positions[col]
if verticals[row] > 1 or diagonals[0][row + col] > 1 or diagonals[1][col + ((length - 1) - row)] > 1:
conflicts.append(col) # col
return conflicts
# translating the coords to string
def get_string_of_queens(size, sol):
res = ''
for coords in sol:
res += '.' * coords + 'Q' + '.' * (size - 1 - coords) + '\n'
return res
flag_of_rec_stop: bool # 36 366 98 989
result: list
# bactracking auxiliary method
def queens_backtrack(n: int, mandatory):
global flag_of_rec_stop, result
result = []
flag_of_rec_stop = False
def recursive_seeker(row: int, vertical_set: list[int],
diag1_set: set[int], diag2_set: set[int],
board_size: int) -> None:
global flag_of_rec_stop, result
# jumping over the mandatory queen
if row == mandatory[0]:
recursive_seeker(row + 1, vertical_set + [mandatory[1]], diag1_set, diag2_set, board_size)
# finishing all the branches of recursion tree
if flag_of_rec_stop:
return
# catching the result
if row == board_size:
for i in range(board_size):
if i == mandatory[0]:
result.append(mandatory[1])
else:
result.append(vertical_set[i])
flag_of_rec_stop = True
return
# cycling all over the possible vertical coords:
for i in range(board_size):
if i not in (vertical_set + [mandatory[1]]) and row + i not in diag1_set and row - i not in diag2_set:
new_vertical_set = list(vertical_set)
new_diag1_set = set(diag1_set)
new_diag2_set = set(diag2_set)
new_vertical_set.append(i)
new_diag1_set.add(row + i)
new_diag2_set.add(row - i)
recursive_seeker(row + 1, new_vertical_set, new_diag1_set, new_diag2_set, board_size)
# no need to bactrack coz of creating new sets
recursive_seeker(0, [], {mandatory[0] + mandatory[1]}, {mandatory[0] - mandatory[1]}, n)
return result if len(result) > 0 else None
start = time.time_ns()
print(solve_n_queens(100, [1, 2]))
finish = time.time_ns()
print(f'Time costs: {(finish - start) // 10 ** 6} milliseconds')
|
LocusLontrime/Python
|
CodeWars_Rush/_1kyu/N_queens_problem_1kyu.py
|
N_queens_problem_1kyu.py
|
py
| 7,078 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71476996348
|
# 피보나치 수열
import sys
input = sys.stdin.readline
n = int(input())
# 1번과 2번 더하면 3번, 2번과 3번 더하면 4번.. 이러한 방법이므로
# A는 B의 값을 받고, B는 A의 값을 더해서 받는다.
# 최종 결과값은 A
A = 0
B = 1
for i in range(n):
A, B = B, B+A
print(A)
|
YOONJAHYUN/Python
|
BOJ/10826.py
|
10826.py
|
py
| 318 |
python
|
ko
|
code
| 2 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.