hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
512a494d92fbf1e23d34b24734f317d3aec4e3c3
| 1,458 |
py
|
Python
|
python/coursera_python/MICHIGAN/WEB/test/re_1.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/coursera_python/MICHIGAN/WEB/test/re_1.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/coursera_python/MICHIGAN/WEB/test/re_1.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
# To use the re function
import re
hand = open('test.txt')
for line in hand:
line=line.rstrip()
# To find if a line contains a 'from' and print it
if re.search("from",line):
print("\nTo find if a line contains a 'from' and print it\n")
print(line)
# To find if the line starts with 'I' then print the line
if re.search("^I",line):
print("\nTo find if the line starts with 'I' then print the line\n")
print(line)
# . matches any character
if re.search("X.*",line):
print("If the line matches with a word having X and any no. of characters after that ")
print(line)
# . matches any character starting with X and ending with s
if re.search("X.*s",line):
print("\nIf the line matches with a word having X and any no. of characters after that and ending with s\n ")
print(line)
# If a line starts with X- followed by only one word no spaces in between then this gonna result
# print("\nIf a line starts with X- followed by only one word no spaces in between then this gonna result \n")
if re.search("^X-\S+:",line):
print("\nIf a line starts with X- followed by only one word no spaces in between then this gonna result \n")
print(line)
# To extract digits from a line
y = re.findall('[0-9]+',line)
print("line = ",line)
print("\nAll the digits extracted are :\n")
print(y)
# To find the upper case vowels
y = re.findall('[AEIOU]',line)
print(y)
| 29.16 | 125 | 0.654321 |
5138951bcd0c69061f2809b634041058061897c2
| 1,038 |
py
|
Python
|
checklisten/tests/test_urls.py
|
mribrgr/StuRa-Mitgliederdatenbank
|
87a261d66c279ff86056e315b05e6966b79df9fa
|
[
"MIT"
] | 8 |
2019-11-26T13:34:46.000Z
|
2021-06-21T13:41:57.000Z
|
src/checklisten/tests/test_urls.py
|
Sumarbrander/Stura-Mitgliederdatenbank
|
691dbd33683b2c2d408efe7a3eb28e083ebcd62a
|
[
"MIT"
] | 93 |
2019-12-16T09:29:10.000Z
|
2021-04-24T12:03:33.000Z
|
src/checklisten/tests/test_urls.py
|
Sumarbrander/Stura-Mitgliederdatenbank
|
691dbd33683b2c2d408efe7a3eb28e083ebcd62a
|
[
"MIT"
] | 2 |
2020-12-03T12:43:19.000Z
|
2020-12-22T21:48:47.000Z
|
from django.test import SimpleTestCase
from django.urls import reverse, resolve
from checklisten.views import *
class TestUrls(SimpleTestCase):
def test_main_screen_url_resolves(self):
url = reverse('checklisten:main_screen')
self.assertEqual(resolve(url).func, main_screen)
def test_abhaken_url_resolves(self):
url = reverse('checklisten:abhaken')
self.assertEqual(resolve(url).func, abhaken)
def test_loeschen_url_resolves(self):
url = reverse('checklisten:loeschen')
self.assertEqual(resolve(url).func, loeschen)
def test_erstellen_url_resolves(self):
url = reverse('checklisten:erstellen')
self.assertEqual(resolve(url).func, erstellen)
def test_get_funktionen_url_resolves(self):
url = reverse('checklisten:get_funktionen')
self.assertEqual(resolve(url).func, get_funktionen)
"""
Template
def test_xxxx_url_resolves(self):
url = reverse('mitglieder:')
self.assertEqual(resolve(url).func, )
"""
| 31.454545 | 59 | 0.702312 |
7a216ac140a8792d415caca521c5845aad159a3a
| 471 |
py
|
Python
|
Packs/MajorBreachesInvestigationandResponse/Scripts/RapidBreachResponseParseBlog/RapidBreachResponseParseBlog.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/MajorBreachesInvestigationandResponse/Scripts/RapidBreachResponseParseBlog/RapidBreachResponseParseBlog.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/MajorBreachesInvestigationandResponse/Scripts/RapidBreachResponseParseBlog/RapidBreachResponseParseBlog.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto # noqa: F401
from bs4 import BeautifulSoup
from CommonServerPython import * # noqa: F401
args = demisto.args()
response = requests.get(args.get("url"))
soup = BeautifulSoup(response.content, "html.parser")
article = soup.find("article").get_text()
_, article = article.split("Phishing Email Campaign", 1)
article = article.replace('[.]', '.')
return_results(CommandResults(readable_output=article, outputs={"http.parsedBlog": article}))
| 36.230769 | 93 | 0.749469 |
8fe9124ccbaa7c4af6dca4b2a0eeaa3e566dc3cb
| 2,723 |
py
|
Python
|
software/supervisor/receiver.py
|
ghsecuritylab/project-powerline
|
6c0ec13bbfc11c3790c506f644db4fe45021440a
|
[
"MIT"
] | null | null | null |
software/supervisor/receiver.py
|
ghsecuritylab/project-powerline
|
6c0ec13bbfc11c3790c506f644db4fe45021440a
|
[
"MIT"
] | null | null | null |
software/supervisor/receiver.py
|
ghsecuritylab/project-powerline
|
6c0ec13bbfc11c3790c506f644db4fe45021440a
|
[
"MIT"
] | 1 |
2020-03-08T01:50:58.000Z
|
2020-03-08T01:50:58.000Z
|
# imports
import time
from database import Panels, Strings, session, panels, strings
# todo While Schleife mit delay und Abfrage beim IC oder falls moeglich auf Meldung von IC warten ob neue Daten gekommen sind
# todo ev. Interrupt um angekickt zu werden
# todo Wenn Daten gekommen sind:
# todo I^2C entschluesseln
# todo CRC entschluesseln und kontrollieren => log verwerfen falls NG
# definition of the datapackages
class ModulePackage(object):
def __init__(self, serialnumber, voltage, stringnumber):
self.serialnumber = serialnumber
self.voltage = voltage
self.stringnumber = stringnumber
self.timestamp = time.time()
class StringPackage(object):
def __init__(self, stringnumber, stringcurrent):
self.stringnumber = stringnumber
self.stringcurrent = stringcurrent
self.timestamp = time.time()
# Creates the Object with all the data todo (gets data from receiver)
modulepackage = ModulePackage(2973881934, 19.732, 2) # Creates the Datapackage (serialnumber, voltage, stringnumber)
stringpackage = StringPackage(1, 30.432) # Creates the Datapackage (stringnumber, stringcurrent)
# Defines the functions to creates the database items and save them into the database
def insert_panel():
modulelog = Panels()
modulelog.serialnumber = modulepackage.serialnumber
modulelog.voltage = modulepackage.voltage
modulelog.stringnumber = modulepackage.stringnumber
modulelog.timestamp = modulepackage.timestamp
session.add(modulelog)
session.flush()
def insert_string():
stringlog = Strings()
stringlog.stringnumber = stringpackage.stringnumber
stringlog.stringcurrent = stringpackage.stringcurrent
stringlog.timestamp = stringpackage.timestamp
session.add(stringlog)
session.flush()
# checks if module already exists in the string
# if yes: save the datalog
# if no: check if theres a reported module in the string
# if no: save the datalog
# if yes: delete the reported logs and save the new one
existinpanels = session.query(Panels).filter((panels.c.serialnumber == modulepackage.serialnumber) & (panels.c.stringnumber == modulepackage.stringnumber)).all()
if len(existinpanels) != 0:
insert_panel()
else:
reportedpanels = session.query(Panels).filter((panels.c.flag_reported == 1) & (panels.c.stringnumber == modulepackage.stringnumber)).all()
if len(reportedpanels) == 0:
insert_panel()
else:
for defectivpanels in reportedpanels:
session.delete(defectivpanels)
insert_panel()
# todo strings abfragen und speichern
# saves the string datalog into the database
# insert_string()
# todo Interrupt von statician ankicken
| 34.0375 | 161 | 0.739258 |
8f4f0e1d3e25244ee885910263b3f5f9b211e497
| 443 |
py
|
Python
|
marsyas-vamp/marsyas/src/django/birdsong/application/birdsong/annotations/models.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/src/django/birdsong/application/birdsong/annotations/models.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/src/django/birdsong/application/birdsong/annotations/models.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from recordings.models import Recording
# Create your models here.
class Annotation(models.Model):
start_time_ms = models.IntegerField(default = 0)
end_time_ms = models.IntegerField(default = 0)
label = models.CharField(max_length = 200)
recording = models.ForeignKey(Recording)
def to_string(self):
return "%i,%i,%i,%s" % (self.id, self.start_time_ms, self.end_time_ms, self.label)
| 31.642857 | 90 | 0.724605 |
8efc21fb05db623003cc35301755f8de360a8752
| 1,631 |
py
|
Python
|
examples/example_sma.py
|
NewLanded/swbt
|
8b8e8609cea060d6f124dc3c4bd99cb6243501dc
|
[
"Apache-2.0"
] | null | null | null |
examples/example_sma.py
|
NewLanded/swbt
|
8b8e8609cea060d6f124dc3c4bd99cb6243501dc
|
[
"Apache-2.0"
] | null | null | null |
examples/example_sma.py
|
NewLanded/swbt
|
8b8e8609cea060d6f124dc3c4bd99cb6243501dc
|
[
"Apache-2.0"
] | 1 |
2019-11-28T16:29:49.000Z
|
2019-11-28T16:29:49.000Z
|
"""简单移动平均线示例, 五日线上穿十日线则买入, 五日线下穿十日线则卖出"""
import datetime
import talib as ta
import pandas as pd
from core.back_test import BackTest
class MyBackTest(BackTest):
def sizer(self):
if self.bs_flag == "B":
self.trans_amount = (self.cash // (self.price + self.commission)) // 2
elif self.bs_flag == "S":
self.trans_amount = self.amount
def strategy(self):
sma_data_5 = ta.MA(self.data["close"], timeperiod=self.parameter["sma_5"], matype=0)
sma_data_10 = ta.MA(self.data["close"], timeperiod=self.parameter["sma_10"], matype=0)
if sma_data_5.iloc[-2] <= sma_data_10.iloc[-2] and sma_data_5.iloc[-1] > sma_data_10.iloc[-1]:
self.bs_flag = "B"
elif sma_data_5.iloc[-2] >= sma_data_10.iloc[-2] and sma_data_5.iloc[-1] < sma_data_10.iloc[-1]:
self.bs_flag = "S"
else:
pass
# self._add_manual_plot_data({"trade_date": self.data["trade_date"].iloc[-1], "sma_data_5": sma_data_5.iloc[-1], "sma_data_10": sma_data_10.iloc[-1]}) # 将 sma_data_5 和 sma_data_10 画图
if __name__ == "__main__":
point_data = pd.read_csv("./point_data_000001.csv", index_col=[0], parse_dates=[2])
basic_data = pd.read_csv("./basic_data_000001.csv", index_col=[0], parse_dates=[2])
ins = MyBackTest(point_data,
datetime.datetime(2016, 5, 1), datetime.datetime(2019, 1, 31),
10000, max_period=11,
parameter_map={"sma_5": [5, 7], "sma_10": [10, 14]}, plot_flag=True,
commission=0.0022)
gain_loss = ins.start()
print(gain_loss)
| 39.780488 | 191 | 0.6168 |
f10f3b720844b90f3b1df10b6533c22bada595b9
| 45 |
py
|
Python
|
src/cory/dao/reminder_dao.py
|
MBogert/ReminderCory
|
a687c70a12f49a807d9fb023d45f799292a37f26
|
[
"MIT"
] | null | null | null |
src/cory/dao/reminder_dao.py
|
MBogert/ReminderCory
|
a687c70a12f49a807d9fb023d45f799292a37f26
|
[
"MIT"
] | null | null | null |
src/cory/dao/reminder_dao.py
|
MBogert/ReminderCory
|
a687c70a12f49a807d9fb023d45f799292a37f26
|
[
"MIT"
] | null | null | null |
class ReminderDao:
def __init__(self):
| 9 | 20 | 0.688889 |
f14aef6c02208f9e4ddc9f53a91f3a6e7a0f08dd
| 17,550 |
py
|
Python
|
Co-Simulation/Sumo/sumo-1.7.0/tools/contributed/sumopy/agilepy/lib_wx/toolbox.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 4 |
2020-11-13T02:35:56.000Z
|
2021-03-29T20:15:54.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/contributed/sumopy/agilepy/lib_wx/toolbox.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 9 |
2020-12-09T02:12:39.000Z
|
2021-02-18T00:15:28.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/contributed/sumopy/agilepy/lib_wx/toolbox.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 1 |
2020-11-20T19:31:26.000Z
|
2020-11-20T19:31:26.000Z
|
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2016-2020 German Aerospace Center (DLR) and others.
# SUMOPy module
# Copyright (C) 2012-2017 University of Bologna - DICAM
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file toolbox.py
# @author Joerg Schweizer
# @date
import sys
import os
import string
import time
if __name__ == '__main__':
try:
FILEDIR = os.path.dirname(os.path.abspath(__file__))
except:
FILEDIR = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.append(os.path.join(FILEDIR, "..", ".."))
IMAGEDIR = os.path.join(os.path.dirname(__file__), "images")
import wx
from wx.lib.buttons import GenBitmapTextButton, GenBitmapButton
from objpanel import ObjPanel, NaviPanel
import agilepy.lib_base.classman as cm
import agilepy.lib_base.arrayman as am
class BaseTool(am.ArrayObjman):
"""
This is a base tool class for Agilecanvas.
It must handle all mouse or keyboard events,
must create and draw helplines and finally
modify the state of client which are grafically
represented on the canvas.
"""
def __init__(self, parent):
"""
To be overridden by specific tool.
"""
self.init_common('select', parent, 'Selection tool',
info='Select objects in cancvas',
is_textbutton=True,
)
def set_button_info(self, bsize=(32, 32)):
# print 'set_button_info select tool'
self._bitmap = wx.Bitmap(os.path.join(IMAGEDIR, 'selectIcon.bmp'), wx.BITMAP_TYPE_BMP)
self._bitmap_sel = wx.Bitmap(os.path.join(IMAGEDIR, 'selectIconSel.bmp'), wx.BITMAP_TYPE_BMP)
def set_cursor(self):
# http://www.wxpython.org/docs/api/wx.Cursor-class.html
if self._canvas is not None:
# self._canvas.SetCursor(wx.StockCursor(wx.CURSOR_QUESTION_ARROW))
pass
def get_button(self, parent, bottonsize=(32, 32), bottonborder=10):
"""
Returns button widget.
Called when toolbar is created.
"""
# simple stockbuttons
#b=wx.Button(parent, wx.ID_DELETE)
id = wx.NewId()
bitmap = self._bitmap
if self._is_textbutton:
b = GenBitmapTextToggleButton(parent, id, bitmap, self.ident.title(), name=self.get_name())
else:
b = GenBitmapToggleButton(parent, id, bitmap,
(bitmap.GetWidth()+bottonborder, bitmap.GetHeight()+bottonborder),
name=self.get_name())
#b=GenBitmapToggleButton(self, wx.ID_DELETE)
#b = GenBitmapTextToggleButton(self, id, None, tool.get('name',''), size = (200, 45))
if bitmap is not None:
#mask = wx.Mask(bitmap, wx.BLUE)
# bitmap.SetMask(mask)
b.SetBitmapLabel(bitmap)
# bmp=wx.NullBitmap
bitmap_sel = self._bitmap_sel
if bitmap_sel is not None:
#mask = wx.Mask(bmp, wx.BLUE)
# bmp.SetMask(mask)
b.SetBitmapSelected(bitmap_sel)
b.SetUseFocusIndicator(False)
b.SetUseFocusIndicator(False)
# b.SetSize((36,140))
# b.SetBestSize()
tt = wx.ToolTip(self.get_info())
b.SetToolTip(tt) # .SetTip(tool.tooltip)
return b
def init_common(self, ident, parent, name, info=None, is_textbutton=False):
# print 'Agiletool.__init__',ident,name
#self.name = name
self._is_textbutton = is_textbutton
self._canvas = None
self._init_objman(ident, parent=parent, name=name.title(), info=info)
#attrsman = self.set_attrsman(cm.Attrsman(self))
self._is_active = False
# print ' call set_button',self.ident
self.set_button_info()
self._optionspanel = None
def get_optionspanel(self, parent, size=wx.DefaultSize):
"""
Return tool option widgets on given parent
"""
size = (200, -1)
self._optionspanel = ObjPanel(parent, obj=self,
attrconfigs=None,
#tables = None,
# table = None, id=None, ids=None,
groupnames=['options'],
func_change_obj=None,
show_groupnames=False, show_title=True, is_modal=False,
mainframe=self.parent.get_mainframe(),
pos=wx.DefaultPosition, size=size, style=wx.MAXIMIZE_BOX | wx.RESIZE_BORDER,
immediate_apply=False, panelstyle='default', # 'instrumental'
standartbuttons=['apply', 'restore'])
return self._optionspanel
def activate(self, canvas=None):
"""
This call by metacanvas??TooldsPallet signals that the tool has been
activated and can now interact with metacanvas.
"""
# print 'activate',self.ident
self._is_active = True
self._canvas = canvas
# self._canvas.del_handles()
canvas.activate_tool(self)
self.set_cursor()
def get_drawing(self):
return self.parent.get_drawing()
def get_drawobj_by_ident(self, ident):
return self.get_drawing().get_drawobj_by_ident(ident)
def deactivate(self):
"""
This call by metacanvas??? ToolePallet signals that the tool has been
deactivated and can now interact with metacanvas.
"""
self._canvas.deactivate_tool()
self._canvas = None
self._is_active = False
def is_active(self):
return self._is_active
def force_deactivation(self):
"""
Explicit call to deactivate this tool in the tools panel.
"""
self.parent.unselect_tool()
def on_left_down(self, event):
return False
def on_left_up(self, event):
return False
def on_left_dclick(self, event):
return False
def on_right_down(self, event):
return False
def on_right_up(self, event):
return self.aboard(event)
def aboard(self):
return False
def on_wheel(self, event):
return False
def on_motion(self, event):
return False # return True if something moved
class DelTool(BaseTool):
def __init__(self, parent):
"""
To be overridden by specific tool.
"""
self.init_common('delete', parent, 'Delete', info='Delete objects in cancvas')
def set_button_info(self, bsize=(32, 32)):
# print 'set_button_info select tool'
self._bitmap = None
self._bitmap_sel = None
def get_button(self, parent, bottonsize=(32, 32), bottonborder=10):
# simple stockbuttons
b = wx.Button(parent, wx.ID_DELETE, name=self.get_name())
b.SetSize(bottonsize)
# b.SetBestSize()
tt = wx.ToolTip(self.get_info())
b.SetToolTip(tt) # .SetTip(tool.tooltip)
# print 'DelTool.get_button',dir(b)
return b
class ToolPalett(wx.Panel):
"""
This is a panel where tools are represented by images and/or text.
The tools are selected in a radio-button-fashion.
Each tool has a string as key. Each time the status changes,
a callback function is called with new and old tool key as argument.
"""
def __init__(self, parent, tools=[], callback=None, n_buttoncolumns=3):
"""
callback is a function that is called when a tool has been selected.
The function is called as:
callback(tool)
"""
# the metacanvas object with which the pallet should apply th tools
# callback when a new tool gets selected (NOT in USE)
self._callback = callback
# wx.Window.__init__(self,parent,wx.ID_ANY,wx.DefaultPosition,wx.DefaultSize,wx.SUNKEN_BORDER|wx.WANTS_CHARS)
# wx.Panel.__init__(self,parent,wx.ID_ANY,wx.DefaultPosition,size,wx.RAISED_BORDER|wx.WANTS_CHARS)
wx.Panel.__init__(self, parent, -1, wx.DefaultPosition, wx.DefaultSize)
# wx.Panel.__init__(self,parent,wx.ID_ANY,wx.DefaultPosition,(300,600),wx.RAISED_BORDER|wx.WANTS_CHARS)
self.sizer = wx.GridSizer(0, n_buttoncolumns, 5, 5)
self.SetSizer(self.sizer)
self._id_to_tool = {}
self._id = -1
for tool in tools:
self.add_tool(tool)
# self.sizer.Fit(self)
# self.SetMaxSize((300,-1))
def has_tool(self, newtool):
for tool, b in self._id_to_tool.values():
if tool.get_ident() == newtool.get_ident():
return True
return False
def get_tool_by_ident(self, ident):
# print 'get_tool_by_ident',ident
for tool, b in self._id_to_tool.values():
# print ' tool',tool.get_ident()
if tool.get_ident() == ident:
return tool
return None
def add_tool(self, tool):
"""
Add a tool to the pallet.
"""
if not self.has_tool(tool):
# print 'add_tool',tool
bottonsize = (32, 32)
bottonborder = 10
toolbarborder = 1
b = tool.get_button(self, bottonsize=bottonsize, bottonborder=bottonborder)
self.Bind(wx.EVT_BUTTON, self.on_select, b)
_id = b.GetId()
self._id_to_tool[_id] = (tool, b)
#self.sizer.Add(b, 0, wx.GROW)
self.sizer.Add(b, 0, wx.EXPAND, border=toolbarborder)
# self.sizer.Add(b)
# print ' _id =',_id
return _id
else:
return -1
def get_tools(self):
"""
Returns lins with all toll instances
"""
tools = []
for (tool, b) in self._id_to_tool.values():
tools.append(tool)
return tools
def refresh(self):
"""
Reorganizes toolpallet after adding/removing tools.
Attention is not automatically called.
"""
self.sizer.Layout()
def on_select(self, event):
"""
Called from a pressed button
"""
_id = event.GetEventObject().GetId()
# print '\n on_select',_id,self._id#,self._id_to_tool[_id]
if _id != self._id:
if self._id_to_tool.has_key(_id):
(tool, button) = self._id_to_tool[_id]
# print ' new tool',tool.get_name()
self.unselect()
self._id = _id
# this will cause the main OGL editor to activate the
# tool with the current canvas
self.GetParent().set_tool(tool)
# if self._callback is not None:
# self._callback(tool)
event.Skip()
return tool
return None
def select(self, _id):
"""
Select explicitelt a tool with _id.
"""
# print '\nselect',_id,self._id,self._id_to_tool
if _id != self._id:
if self._id_to_tool.has_key(_id):
(tool, button) = self._id_to_tool[_id]
# print ' explicitly press button'
if hasattr(button, 'SetToggle'):
button.SetToggle(True)
else:
button.SetFocus()
# print 'button.SetFocus',button.SetFocus.__doc__
# pass
# print ' new tool',tool.get_name()
# self.unselect()
self._id = _id
self.GetParent().set_tool(tool)
# if self._callback is not None:
# self._callback(tool)
return tool
return None
def unselect(self):
"""
Unselect currently selected tool.
"""
if self._id_to_tool.has_key(self._id):
(tool, button) = self._id_to_tool[self._id]
if tool.is_active() == True:
# Disactivate current tool
tool.deactivate()
if hasattr(button, 'SetToggle'):
button.SetToggle(False)
else:
# button.SetFocus()
# print 'button.SetFocus',button.SetFocus.__doc__
pass
class __ToggleMixin:
def SetToggle(self, flag):
self.up = not flag
self.Refresh()
SetValue = SetToggle
def GetToggle(self):
return not self.up
GetValue = GetToggle
def OnLeftDown(self, event):
if not self.IsEnabled():
return
self.saveUp = self.up
self.up = False # not self.up
self.CaptureMouse()
self.SetFocus()
self.Refresh()
def OnLeftUp(self, event):
if not self.IsEnabled() or not self.HasCapture():
return
if self.HasCapture():
if self.up != self.saveUp:
self.Notify()
self.ReleaseMouse()
self.Refresh()
def OnKeyDown(self, event):
event.Skip()
class GenBitmapTextToggleButton(__ToggleMixin, GenBitmapTextButton):
"""A generic toggle bitmap button with text label"""
pass
class GenBitmapToggleButton(__ToggleMixin, GenBitmapButton):
"""A generic toggle bitmap button with text label"""
pass
class ToolsPanel(wx.Panel):
"""
Shows a toolpallet with different tools and an options panel.
"""
def __init__(self, parent, size=wx.DefaultSize, size_title=150, **kwargs):
#size = wx.DefaultSize
#size = (300,-1)
wx.Panel.__init__(self, parent, wx.NewId(), wx.DefaultPosition, size)
# wx.DefaultSize
# sizer=wx.BoxSizer(wx.VERTICAL)
sizer = wx.StaticBoxSizer(wx.StaticBox(parent, wx.NewId(), "test"), wx.VERTICAL)
self._toolspalett = ToolPalett(self, **kwargs)
# self._toolspalett.add_tool(BaseTool(self))
# create initial option panel
self._optionspanel = wx.Window(self)
self._optionspanel.SetBackgroundColour("pink")
wx.StaticText(self._optionspanel, -1, "Tool Options", (size_title, -1))
# OK, but toolspane changes size with optionpanel
#sizer.Add(self._toolspalett,0, wx.ALL | wx.ALIGN_LEFT | wx.GROW, 4)
# sizer.Add(self._optionspanel,1,wx.GROW)# wx.EXPAND
sizer.Add(self._toolspalett, 0, wx.EXPAND)
sizer.Add(self._optionspanel, 1, wx.EXPAND)
# finish panel setup
self.SetSizer(sizer)
sizer.Fit(self)
# self.SetSize(parent.GetSize())
# self.SetMaxSize((300,-1))
def get_canvas(self):
# ask the OGL editor for the currently active canvas in focus
return self.GetParent().get_canvas()
def get_drawing(self):
return self.get_canvas().get_drawing()
def get_mainframe(self):
return self.GetParent().get_mainframe()
def add_tool(self, tool):
return self._toolspalett.add_tool(tool)
def add_toolclass(self, ToolClass, **kwargs):
# init and add
return self._toolspalett.add_tool(ToolClass(self, **kwargs))
def add_initial_tool(self, tool):
self._id_initialtool = self.add_tool(tool)
def reset_initial_tool(self):
self.set_tool_with_id(self._id_initialtool)
def reset_initial_tool(self):
self.set_tool_with_id(self._id_initialtool)
def set_tool_with_id(self, _id):
"""
Explicitely set a tool from tool pallet using its id.
Used to set initial tool.
"""
# print 'set_tool_with_id',_id
return self._toolspalett.select(_id)
def set_tool(self, tool):
"""
Called by toolpallet after new tool has been selected.
"""
# Activate current tool
# then tool wil set itself to canvas
tool.activate(self.get_canvas())
# set options of current tool
self.refresh_optionspanel(tool)
def get_tool_by_ident(self, ident):
return self._toolspalett.get_tool_by_ident(ident)
def refresh_optionspanel(self, tool):
sizer = self.GetSizer()
sizer.Remove(1)
self._optionspanel.Destroy()
self._optionspanel = tool.get_optionspanel(self) # , size = self.GetSize())
# self._optionspanel.SetSize((100,0))
# if id is not None:
# self.objpanel=ObjPanel(self,obj,id=id,func_change_obj=self.change_obj)
# else:
# self.objpanel=ObjPanel(self,obj,func_change_obj=self.change_obj)
# ok, but chanes sice of whole palle
# sizer.Add(self._optionspanel,1,wx.GROW)
sizer.Add(self._optionspanel, 1, wx.EXPAND)
# self.Refresh()
# sizer.Fit(self)
sizer.Layout()
# self.GetParent().Layout()
def unselect_tool(self):
"""
Unselect currently selected tool.
"""
self._toolspalett.unselect()
| 32.025547 | 117 | 0.591909 |
f180725a67e0f18a5aa21168ec71ba46b0daf5cf
| 331 |
py
|
Python
|
Problems/Dynamic Programming/Easy/MinCostClimbingStair/climbing_stairs.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Problems/Dynamic Programming/Easy/MinCostClimbingStair/climbing_stairs.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/Dynamic Programming/Easy/MinCostClimbingStair/climbing_stairs.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from typing import List
def minCostClimbingStairs(cost: List[int]) -> int:
n = len(cost)
if not cost:
return 0
dt = [0 for _ in range(n)]
dt[0] = cost[0]
if n >= 2:
dt[1] = cost[1]
for i in range(2, n):
dt[i] = cost[i] + min(dt[i - 1], dt[i - 2])
return min(dt[-1], dt[-2])
| 18.388889 | 51 | 0.495468 |
24779c38d7ae66ffe9af7faafd5076fef8341abb
| 7,520 |
py
|
Python
|
x2paddle/optimizer/pytorch_code_optimizer/subgraphs_union.py
|
usertianqin/X2Paddle
|
b554a8094ca3e255ef4bd2e80337222a35625133
|
[
"Apache-2.0"
] | 559 |
2019-01-14T06:01:55.000Z
|
2022-03-31T02:52:43.000Z
|
x2paddle/optimizer/pytorch_code_optimizer/subgraphs_union.py
|
usertianqin/X2Paddle
|
b554a8094ca3e255ef4bd2e80337222a35625133
|
[
"Apache-2.0"
] | 353 |
2019-05-07T13:20:03.000Z
|
2022-03-31T05:30:12.000Z
|
x2paddle/optimizer/pytorch_code_optimizer/subgraphs_union.py
|
usertianqin/X2Paddle
|
b554a8094ca3e255ef4bd2e80337222a35625133
|
[
"Apache-2.0"
] | 241 |
2018-12-25T02:13:51.000Z
|
2022-03-27T23:21:43.000Z
|
# -*- coding:UTF-8 -*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import pandas as pd
from x2paddle.optimizer.pytorch_code_optimizer.layer_code_generator import rename_layers
def construct_attrs_table(sub_layers_list, node_name2sub_layers=None, module_name=None):
""" 构造不同属性的表格。
"""
def get_node_name(sub_layers):
for k, v in node_name2sub_layers.items():
if v == sub_layers:
node_name = k
break
return node_name
sub_layers = sub_layers_list[0]
_, _, new_names = rename_layers(sub_layers)
table = list()
node_names = list()
for i, sub_layers in enumerate(sub_layers_list):
attrs = dict()
if node_name2sub_layers is not None:
node_names.append(get_node_name(sub_layers))
else:
node_names.append("{}_{}".format(module_name, i))
for i, (layer_id, layer) in enumerate(sub_layers.items()):
for k, v in layer.attrs.items():
attrs[new_names[i] + "_{}".format(k)] = v
table.append(attrs)
pd_table = pd.DataFrame(table, index=node_names)
return pd_table
def get_inputs_outputs(pd_graph, layers):
inputs = list()
outputs = list()
cur_outputs = list()
layer_ids = list(layers.keys())
for layer_id, layer in layers.items():
# 获取输出节点名字
if layer_id not in pd_graph.edges_out:
for index, output_name in enumerate(layer.outputs):
if not output_name.startswith("x") or output_name in outputs \
or layer.kernel == "prim.assert":
continue
elif layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if index != 0:
outputs.append(output_name)
elif output_name not in outputs:
outputs.append(output_name)
else:
for out_layer_id in pd_graph.edges_out[layer_id]:
if out_layer_id not in layer_ids:
for index, output_name in enumerate(layer.outputs):
if not output_name.startswith("x") or output_name in outputs \
or layer.kernel == "prim.assert":
continue
elif layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if index != 0:
outputs.append(output_name)
else:
outputs.append(output_name)
# 获取输入节点名字
for k, v in layer.inputs.items():
if v not in cur_outputs and v not in inputs:
inputs.append(v)
if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel):
cur_outputs.extend(layer.outputs[1:])
else:
cur_outputs.extend(layer.outputs)
return inputs, outputs
def get_inputs_count(pd_graph, sub_layers):
input_ct2sub_layer_id = dict()
for i, sub_layer in enumerate(sub_layers):
inputs, outputs = get_inputs_outputs(pd_graph, sub_layer)
if len(inputs) not in input_ct2sub_layer_id:
input_ct2sub_layer_id[len(inputs)] = [i]
else:
input_ct2sub_layer_id[len(inputs)].append(i)
return input_ct2sub_layer_id
def distinguish_sequential(pd_graph, module_name, sub_layers, sub_identifiers, node_name2sub_layers):
""" 获取不同的layers组成的序列
"""
def distinguish_sequential_by_inputs(part_layers, part_identifiers, part_module_name):
new_sub_layers = dict()
new_sub_sequentials = dict()
sequentials2attrs_table = dict()
input_ct2sub_layer_id = get_inputs_count(pd_graph, part_layers)
if len(input_ct2sub_layer_id) == 1:
new_sub_layers["{}".format(part_module_name)] = part_layers
new_sub_sequentials["{}".format(part_module_name)] = part_identifiers
sequentials2attrs_table["{}".format(part_module_name)] = construct_attrs_table(part_layers, node_name2sub_layers)
else:
for i, (k, indexes) in enumerate(input_ct2sub_layer_id.items()):
new_sub_layers["{}__{}".format(part_module_name, i)] = list()
new_sub_sequentials["{}__{}".format(part_module_name, i)] = list()
for index in indexes:
new_sub_layers["{}__{}".format(part_module_name, i)].append(part_layers[index])
new_sub_sequentials["{}__{}".format(part_module_name, i)].append(part_identifiers[index])
sequentials2attrs_table["{}__{}".format(part_module_name, i)] = \
construct_attrs_table(new_sub_layers["{}__{}".format(part_module_name, i)], node_name2sub_layers)
return new_sub_layers, new_sub_sequentials, sequentials2attrs_table
new_sub_layers = dict()
new_sub_sequentials = dict()
sequentials2attrs_table = dict()
identifiers_str_list = list()
for identifiers in sub_identifiers:
identifiers_str_list.append(", ".join(list(identifiers.values())))
identifiers_str_set = list(set(identifiers_str_list))
if len(identifiers_str_set) == 1:
return distinguish_sequential_by_inputs(sub_layers, sub_identifiers, module_name)
else:
for i in range(len(identifiers_str_set)):
new_sub_layers["{}{}".format(module_name, i)] = list()
new_sub_sequentials["{}{}".format(module_name, i)] = list()
no_same_module_count = 0
for j, identifiers in enumerate(sub_identifiers):
identifiers_str = identifiers_str_list[j]
for i in range(len(identifiers_str_set)):
if identifiers_str_set[i] == identifiers_str:
is_diff = False
if identifiers_str_set[i].replace(", ", "").isdigit() or module_name == "ModuleList":
new_sub_layers["{}{}".format(module_name, len(identifiers_str_set) + no_same_module_count)] = [sub_layers[j]]
new_sub_sequentials["{}{}".format(module_name, len(identifiers_str_set) + no_same_module_count)] = [identifiers]
no_same_module_count += 1
else:
new_sub_layers["{}{}".format(module_name, i)].append(sub_layers[j])
new_sub_sequentials["{}{}".format(module_name, i)].append(identifiers)
break
new_new_sub_layers = dict()
new_new_sub_sequentials = dict()
for k, v in new_sub_layers.items():
part_sub_layers, part_sub_sequentials, part_sequentials2attrs_table = \
distinguish_sequential_by_inputs(v, new_sub_sequentials[k], k)
new_new_sub_layers.update(part_sub_layers)
new_new_sub_sequentials.update(part_sub_sequentials)
sequentials2attrs_table.update(part_sequentials2attrs_table)
return new_new_sub_layers, new_new_sub_sequentials, sequentials2attrs_table
| 48.205128 | 132 | 0.63484 |
3b29ff066ca13ce397f305f577e8cb4642dbca08
| 1,793 |
py
|
Python
|
python/en/archive/topics/command_line_arguments/TODOs/04-cmd_line_args_parsing2.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/topics/command_line_arguments/TODOs/04-cmd_line_args_parsing2.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/topics/command_line_arguments/TODOs/04-cmd_line_args_parsing2.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys, getopt
def usage():
print("usage: python3 04-cmd_line_args_parsing2.py arg1 arg2 arg3")
def print_opts():
print("file_name =", file_name )
print("argc =", argc )
print("argv =", argv )
print("opts =", opts )
print("args =", args )
print("input_file=", input_file )
print("output_file=", output_file )
def main( argc, argv ):
print("argv =", argv )
print("argc =", argc )
try:
# opts, args = getopt.getopt( argv, "", [])
# Input
# argv is the (entire) argument list
# "" is a short option starting with a hyphen -. Example: -h
# An argument should be followed by a colon (:).
# [] is a long option start with two hyphens --. Example: --help
# An argument should be followed by an equal sign ('=').
# Output
# opts is a list of (option, value) pairs.
# args is the list of program arguments left after the option list was stripped.
short_opt = "hi:o:"
long_opt = ["help","input=", "output="]
opts, args = getopt.getopt( argv, short_opt,long_opt)
print("opts =", opts )
print("args =", args )
except getopt.GetoptError:
usage()
sys.exit(2)
input_file = ''
output_file = ''
for opt, val in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-i", "--input"):
input_file = arg
elif opt in ("-o","--output"):
output_file = arg
else :
usage()
sys.exit(2)
if __name__ == "__main__":
# Process the command line arguments
argc = len( sys.argv )
file_name = sys.argv[0]
argv = str( sys.argv[1:] )
print("file_name =", file_name )
print("sys.argv =", sys.argv )
main( argc, argv )
| 26.367647 | 88 | 0.560513 |
3b7ea78abb9f5498d5b75b4c449c53937d1747fe
| 358 |
py
|
Python
|
05_fibinacci/example.py
|
wuyueCreator/python-test
|
6072ac9264a257c89925469238c14fff3bda5630
|
[
"MIT"
] | 1 |
2019-03-25T03:44:54.000Z
|
2019-03-25T03:44:54.000Z
|
05_fibinacci/example.py
|
wuyueCreator/python-test
|
6072ac9264a257c89925469238c14fff3bda5630
|
[
"MIT"
] | null | null | null |
05_fibinacci/example.py
|
wuyueCreator/python-test
|
6072ac9264a257c89925469238c14fff3bda5630
|
[
"MIT"
] | null | null | null |
import math
import profile
def fibonacci(n):
return int(1 / math.sqrt(5) * (((1 + math.sqrt(5)) / 2) ** n - ((1 - math.sqrt(5)) / 2) ** n))
def fibonacci2(n):
a, b = 0, 1
while n:
a, b = b, a + b
n -= 1
return a
def main():
fibonacci(1474)
fibonacci2(1474)
if __name__ == '__main__':
profile.run('main()')
| 14.916667 | 98 | 0.511173 |
d9723e9a68baa8f046126f72a03c5da9b45f1837
| 576 |
py
|
Python
|
rabbitmq/tutorial/new_task.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
rabbitmq/tutorial/new_task.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
rabbitmq/tutorial/new_task.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
import sys
import pika
credentials = pika.PlainCredentials('admin', '1q2w3e')
parameters = pika.ConnectionParameters('192.168.1.197', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True)
message = ' '.join(sys.argv[1:]) or "Hello World"
channel.basic_publish(exchange='',
routing_key='task_queue',
body=message,
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
))
print(' [x] Sent %r' % message)
connection.close()
| 28.8 | 79 | 0.729167 |
8db51bc16dbab990ff9228de53b1906a0da4969e
| 8,392 |
py
|
Python
|
defis/helper.py
|
lehr-laemp/kiga-webapp-V2
|
1d35dd1deb82962e166169245fa0d4e5f265af22
|
[
"Unlicense"
] | null | null | null |
defis/helper.py
|
lehr-laemp/kiga-webapp-V2
|
1d35dd1deb82962e166169245fa0d4e5f265af22
|
[
"Unlicense"
] | null | null | null |
defis/helper.py
|
lehr-laemp/kiga-webapp-V2
|
1d35dd1deb82962e166169245fa0d4e5f265af22
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Begonnen am 12.03.2022
@author: HM
helper.py
- alle nötigen Funktionen
"""
# ---------------------------------------------------------
import datetime
import os
import pickle
import shutil
import smtplib # für Mail
import ssl # für Mail
from email.message import EmailMessage
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
# from threading import Timer
import pyAesCrypt
import streamlit as st
import openpyxl
# ---------------------------------------------------------
def excel_tabelle_entschluesseln():
print(datetime.datetime.now().strftime('%H-%M-%S'), ': Datenbank entschlüsseln')
pyAesCrypt.decryptFile('Daten/sus.aes', 'Daten/sus.xlsx', st.secrets['tabelle_passwort'])
# Mache ein Backup der Datenbank
# Viele Backups-Dateien
# backup_dir = 'Backup/' + datetime.datetime.now().strftime('%y-%m-%d-%H-%M-%S') + '-sus.aes'
# nur 1 Backup-Datei
backup_dir = 'Backup/backup-sus.aes'
shutil.copyfile('Daten/sus.aes', backup_dir)
return True
# ---------------------------------------------------------
def excel_tabelle_in_liste_speichern():
print(datetime.datetime.now().strftime('%H-%M-%S'), ': Excel-Tabelle in SuS-Liste speichern')
# öffne und aktiviere Excel-Tabelle
kiga_datei = openpyxl.load_workbook('Daten/sus.xlsx')
kiga_tabelle = kiga_datei.active
sus_liste = []
sus_einzel = []
for reihe in range(2, kiga_tabelle.max_row + 1):
for spalte in range(1, 17):
sus_einzel.append(
kiga_tabelle.cell(row=reihe, column=spalte).value)
if spalte == 16:
sus_liste.append(sus_einzel)
sus_einzel = []
# OK print(sus_liste)
return sus_liste
# ---------------------------------------------------------
def liste_in_pickle_speichern(sus_liste):
print(datetime.datetime.now().strftime('%H-%M-%S'), ': Liste in Pickle speichern')
# Liste in Pickle speichern
with open('Daten/sus.tmp', 'wb') as datei_handler:
pickle.dump(sus_liste, datei_handler)
return True
# ---------------------------------------------------------
def excel_tabelle_loeschen():
print(datetime.datetime.now().strftime('%H-%M-%S'), ': Excel-Tabelle löschen')
os.remove('Daten/sus.xlsx')
return True
# ---------------------------------------------------------
def pickle_in_excel_speichern():
"""
Speichert Pickle-Dump in Excel-Tabelle
Öffnet dazu die verschlüsselte Tabelle
Schreibt die Daten in die Tabelle
Verschlüsselt die Tabelle wieder
Löscht die entschlüsselte Tabelle
Löscht den Pickle-Dump
"""
print(datetime.datetime.now().strftime('%H-%M-%S'), ': Pickle in Excel-Tabelle speichern')
# Pickle-Dump auslesen
with open('Daten/sus.tmp', 'rb') as datei_handler:
sus_liste = pickle.load(datei_handler)
# Tabelle entschlüsseln
excel_tabelle_entschluesseln()
# öffne und aktiviere Excel-Tabelle
kiga_datei = openpyxl.load_workbook('Daten/sus.xlsx')
kiga_tabelle = kiga_datei.active
# Schreibe die SuS-Liste in der Excel-Tabelle
for reihe in range(len(sus_liste)):
# OK print(liste[reihe])
for spalte in range(0, 16):
kiga_tabelle.cell(row=reihe+2, column=spalte+1).value = sus_liste[reihe][spalte]
# Speichere die Excel-Tabelle
kiga_datei.save('Daten/sus.xlsx')
excel_tabelle_verschluesseln() # und löschen
# Pickle-Dump löschen
os.remove('Daten/sus.tmp')
return True
# ---------------------------------------------------------
def excel_tabelle_verschluesseln():
print(datetime.datetime.now().strftime('%H-%M-%S'), ': Excel-Tabelle verschlüsseln')
# Excel-Tabelle verschlüsseln
pyAesCrypt.encryptFile('Daten/sus.xlsx', 'Daten/sus.aes', st.secrets['tabelle_passwort'])
# Excel-Tabelle löschen
os.remove('Daten/sus.xlsx')
# Mache ein Backup der Datenbank
# backup_dir = 'Backup/' + datetime.datetime.now().strftime('%y-%m-%d-%H-%M-%S') + '-sus.aes'
backup_dir = 'Backup/backup-sus.aes'
shutil.copyfile('Daten/sus.aes', backup_dir)
return True
# ---------------------------------------------------------
def liste_aus_pickle_holen():
print(datetime.datetime.now().strftime('%H-%M-%S'),': Liste aus Pickle-Dump holen')
# Pickle-Dump auslesen
with open('Daten/sus.tmp', 'rb') as datei_handler:
sus_liste = pickle.load(datei_handler)
return sus_liste
# ---------------------------------------------------------
def kiga_standorte_lesen(sus_liste):
"""
Lies aus der Liste der SuS die möglichen Kiga-Standorte
return: Liste der Kiga, sortiert
"""
print(datetime.datetime.now().strftime('%H-%M-%S'), ': Kiga-Standorte einlesen', )
kiga_liste = []
for i in range(len(sus_liste)):
# OK print(sus_liste[i][4])
kiga_liste.append(sus_liste[i][4])
return sorted(set(kiga_liste))
# ---------------------------------------------------------
def mail_senden(betreff):
"""
Schickt eine Nachricht beim Anmelden oder Abmelden
"""
print(datetime.datetime.now().strftime('%H-%M-%S'), ': Schicke ein Mail')
# wieviele Backup-Dateien hat es?
backup_zaehler = 0
for pfad in os.listdir('Backup/'):
backup_zaehler += 1
# OK print('Anzahl Backup-Dateien:', backup_zaehler)
nachricht = f"""Mail von der Daten-Eingabe:
Eine {betreff}.
Es sind {backup_zaehler} Dateien im Backup-Ordner.
Herzliche Grüsse :-)
"""
# Angaben für den Server
gmx_smpt = 'mail.gmx.net'
gmx_passwort = st.secrets['mail_passwort']
gmx_port = 587
# Angaben zum Mail
mail_von = '[email protected]'
mail_fuer = '[email protected]'
mail_betreff = betreff
mail_text = nachricht
# Anhang für Mail
dateiname = 'sus.aes'
with open('Daten/sus.aes', "rb") as attachment:
# Add file as application/octet-stream
# Email client can usually download this automatically as attachment
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
# Encode file in ASCII characters to send by email
encoders.encode_base64(part)
# Add header as key/value pair to attachment part
part.add_header(
"Content-Disposition",
f"attachment; filename={dateiname}",)
# übersetzen in Email-Format
nachricht = MIMEMultipart() #EmailMessage()
# nachricht.set_content(mail_text)
nachricht['Subject'] = mail_betreff
nachricht['From'] = mail_von
nachricht['To'] = mail_fuer
# Add attachment to message and convert message to string
nachricht.attach(part)
# Verbindung mit Server
context = ssl.create_default_context()
try:
server = smtplib.SMTP(gmx_smpt, gmx_port)
#server.set_debuglevel(1)
server.starttls(context=context)
server.login(mail_von, gmx_passwort)
server.send_message(nachricht)
except Exception as e:
# print(e)
st.warning('Kann Email nicht senden.')
finally:
server.quit()
# ---------------------------------------------------------
def melde_status():
print(datetime.datetime.now().strftime('%H-%M-%S'),': Melde Status')
global timer
# ist jemand angemeldet?
anmelde_status = 2 #st.session_state['angemeldet']
print(anmelde_status)
# geht nicht print(st.session_state.angemeldet)
# geht nicht print(st.session_state['timer'])
# wieviele Backup-Dateien hat es?
zaehler = 0
for pfad in os.listdir('Backup/'):
zaehler += 1
print('Anzahl Backup-Dateien:', zaehler)
# Timer stoppen und neu starten
timer.cancel()
start_timer()
# ---------------------------------------------------------
def start_timer():
global timer
print(datetime.datetime.now().strftime('%H-%M-%S'),': Starte den Timer')
# ti = Timer(30, melde_automatisch_ab, args=None)
timer = Timer(10, melde_status, args=None)
timer.start()
# ---------------------------------------------------------
def stop_timer():
global timer
print(datetime.datetime.now().strftime('%H-%M-%S'),': Stoppe den Timer')
timer.cancel()
| 27.605263 | 97 | 0.596163 |
d68f402a96a70f5e448461903c08393ab7d4363c
| 1,628 |
py
|
Python
|
solutions/pedestrian_search/webserver/src/service/models/model.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | 1 |
2021-04-06T06:13:20.000Z
|
2021-04-06T06:13:20.000Z
|
solutions/pedestrian_search/webserver/src/service/models/model.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | null | null | null |
solutions/pedestrian_search/webserver/src/service/models/model.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
from .bi_lstm import BiLSTM
from .mobilenet import MobileNetV1
from .resnet import resnet50
class Model(nn.Module):
def __init__(self, args):
super(Model, self).__init__()
if args.image_model == 'mobilenet_v1':
self.image_model = MobileNetV1()
self.image_model.apply(self.image_model.weight_init)
elif args.image_model == 'resnet50':
self.image_model = resnet50()
elif args.image_model == 'resent101':
self.image_model = resnet101()
self.bilstm = BiLSTM(args)
self.bilstm.apply(self.bilstm.weight_init)
inp_size = 1024
if args.image_model == 'resnet50' or args.image_model == 'resnet101':
inp_size = 2048
# shorten the tensor using 1*1 conv
self.conv_images = nn.Conv2d(inp_size, args.feature_size, 1)
self.conv_text = nn.Conv2d(1024, args.feature_size, 1)
def forward(self, images, text, text_length):
image_features = self.image_model(images)
text_features = self.bilstm(text, text_length)
image_embeddings, text_embeddings= self.build_joint_embeddings(image_features, text_features)
return image_embeddings, text_embeddings
def build_joint_embeddings(self, images_features, text_features):
#images_features = images_features.permute(0,2,3,1)
#text_features = text_features.permute(0,3,1,2)
image_embeddings = self.conv_images(images_features).squeeze()
text_embeddings = self.conv_text(text_features).squeeze()
return image_embeddings, text_embeddings
| 36.177778 | 101 | 0.679361 |
ba9499f6452662b5f5c804d2f00576ceace5f45e
| 25,166 |
py
|
Python
|
hihope_neptune-oh_hid/00_src/v0.1/third_party/LVM2/daemons/lvmdbusd/lv.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | 1 |
2022-02-15T08:51:55.000Z
|
2022-02-15T08:51:55.000Z
|
hihope_neptune-oh_hid/00_src/v0.3/third_party/LVM2/daemons/lvmdbusd/lv.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
hihope_neptune-oh_hid/00_src/v0.3/third_party/LVM2/daemons/lvmdbusd/lv.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .automatedproperties import AutomatedProperties
from . import utils
from .utils import vg_obj_path_generate
import dbus
from . import cmdhandler
from . import cfg
from .cfg import LV_INTERFACE, THIN_POOL_INTERFACE, SNAPSHOT_INTERFACE, \
LV_COMMON_INTERFACE, CACHE_POOL_INTERFACE, LV_CACHED
from .request import RequestEntry
from .utils import n, n32
from .loader import common
from .state import State
from . import background
from .utils import round_size, mt_remove_dbus_objects
from .job import JobState
# Try and build a key for a LV, so that we sort the LVs with least dependencies
# first. This may be error prone because of the flexibility LVM
# provides and what you can stack.
def get_key(i):
name = i['lv_name']
parent = i['lv_parent']
pool = i['pool_lv']
a1 = ""
a2 = ""
if name[0] == '[':
a1 = '#'
# We have a parent
if parent:
# Check if parent is hidden
if parent[0] == '[':
a2 = '##'
else:
a2 = '#'
# If a LV has a pool, then it should be sorted/loaded after the pool
# lv, unless it's a hidden too, then after other hidden, but before visible
if pool:
if pool[0] != '[':
a2 += '~'
else:
a1 = '$' + a1
return "%s%s%s" % (a1, a2, name)
# noinspection PyUnusedLocal
def lvs_state_retrieve(selection, cache_refresh=True):
rc = []
if cache_refresh:
cfg.db.refresh()
# When building up the model, it's best to process LVs with the least
# dependencies to those that are dependant upon other LVs. Otherwise, when
# we are trying to gather information we could be in a position where we
# don't have information available yet.
lvs = sorted(cfg.db.fetch_lvs(selection), key=get_key)
for l in lvs:
rc.append(LvState(
l['lv_uuid'], l['lv_name'],
l['lv_path'], n(l['lv_size']),
l['vg_name'],
l['vg_uuid'], l['pool_lv_uuid'],
l['pool_lv'], l['origin_uuid'], l['origin'],
n32(l['data_percent']), l['lv_attr'],
l['lv_tags'], l['lv_active'], l['data_lv'],
l['metadata_lv'], l['segtype'], l['lv_role'],
l['lv_layout'],
n32(l['snap_percent']),
n32(l['metadata_percent']),
n32(l['copy_percent']),
n32(l['sync_percent']),
n(l['lv_metadata_size']),
l['move_pv'],
l['move_pv_uuid']))
return rc
def load_lvs(lv_name=None, object_path=None, refresh=False, emit_signal=False,
cache_refresh=True):
# noinspection PyUnresolvedReferences
return common(
lvs_state_retrieve,
(LvCommon, Lv, LvThinPool, LvSnapShot),
lv_name, object_path, refresh, emit_signal, cache_refresh)
# noinspection PyPep8Naming,PyUnresolvedReferences,PyUnusedLocal
class LvState(State):
@staticmethod
def _pv_devices(uuid):
rc = []
for pv in sorted(cfg.db.lv_contained_pv(uuid)):
(pv_uuid, pv_name, pv_segs) = pv
pv_obj = cfg.om.get_object_path_by_uuid_lvm_id(pv_uuid, pv_name)
segs_decorate = []
for i in pv_segs:
segs_decorate.append((dbus.UInt64(i[0]),
dbus.UInt64(i[1]),
dbus.String(i[2])))
rc.append((dbus.ObjectPath(pv_obj), segs_decorate))
return dbus.Array(rc, signature="(oa(tts))")
def vg_name_lookup(self):
return cfg.om.get_object_by_path(self.Vg).Name
@property
def lvm_id(self):
return "%s/%s" % (self.vg_name_lookup(), self.Name)
def identifiers(self):
return (self.Uuid, self.lvm_id)
def _get_hidden_lv(self):
rc = dbus.Array([], "o")
vg_name = self.vg_name_lookup()
for l in cfg.db.hidden_lvs(self.Uuid):
full_name = "%s/%s" % (vg_name, l[1])
op = cfg.om.get_object_path_by_uuid_lvm_id(l[0], full_name)
assert op
rc.append(dbus.ObjectPath(op))
return rc
def __init__(self, Uuid, Name, Path, SizeBytes,
vg_name, vg_uuid, pool_lv_uuid, PoolLv,
origin_uuid, OriginLv, DataPercent, Attr, Tags, active,
data_lv, metadata_lv, segtypes, role, layout, SnapPercent,
MetaDataPercent, CopyPercent, SyncPercent, MetaDataSizeBytes,
move_pv, move_pv_uuid):
utils.init_class_from_arguments(self)
# The segtypes is possibly an array with potentially dupes or a single
# value
self._segs = dbus.Array([], signature='s')
if not isinstance(segtypes, list):
self._segs.append(dbus.String(segtypes))
else:
self._segs.extend([dbus.String(x) for x in set(segtypes)])
self.Vg = cfg.om.get_object_path_by_uuid_lvm_id(
vg_uuid, vg_name, vg_obj_path_generate)
self.Devices = LvState._pv_devices(self.Uuid)
if PoolLv:
gen = utils.lv_object_path_method(Name, (Attr, layout, role))
self.PoolLv = cfg.om.get_object_path_by_uuid_lvm_id(
pool_lv_uuid, '%s/%s' % (vg_name, PoolLv), gen)
else:
self.PoolLv = '/'
if OriginLv:
self.OriginLv = \
cfg.om.get_object_path_by_uuid_lvm_id(
origin_uuid, '%s/%s' % (vg_name, OriginLv),
vg_obj_path_generate)
else:
self.OriginLv = '/'
self.HiddenLvs = self._get_hidden_lv()
@property
def SegType(self):
return self._segs
def _object_path_create(self):
return utils.lv_object_path_method(
self.Name, (self.Attr, self.layout, self.role))
def _object_type_create(self):
if self.Attr[0] == 't':
return LvThinPool
elif self.Attr[0] == 'C':
if 'pool' in self.layout:
return LvCachePool
else:
return LvCacheLv
elif self.Name[0] == '[':
return LvCommon
elif self.OriginLv != '/':
return LvSnapShot
else:
return Lv
def create_dbus_object(self, path):
if not path:
path = cfg.om.get_object_path_by_uuid_lvm_id(
self.Uuid, self.lvm_id, self._object_path_create())
obj_ctor = self._object_type_create()
return obj_ctor(path, self)
def creation_signature(self):
klass = self._object_type_create()
path_method = self._object_path_create()
return (klass, path_method)
# noinspection PyPep8Naming
@utils.dbus_property(LV_COMMON_INTERFACE, 'Uuid', 's')
@utils.dbus_property(LV_COMMON_INTERFACE, 'Name', 's')
@utils.dbus_property(LV_COMMON_INTERFACE, 'Path', 's')
@utils.dbus_property(LV_COMMON_INTERFACE, 'SizeBytes', 't')
@utils.dbus_property(LV_COMMON_INTERFACE, 'SegType', 'as')
@utils.dbus_property(LV_COMMON_INTERFACE, 'Vg', 'o')
@utils.dbus_property(LV_COMMON_INTERFACE, 'OriginLv', 'o')
@utils.dbus_property(LV_COMMON_INTERFACE, 'PoolLv', 'o')
@utils.dbus_property(LV_COMMON_INTERFACE, 'Devices', "a(oa(tts))")
@utils.dbus_property(LV_COMMON_INTERFACE, 'HiddenLvs', "ao")
@utils.dbus_property(LV_COMMON_INTERFACE, 'Attr', 's')
@utils.dbus_property(LV_COMMON_INTERFACE, 'DataPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'SnapPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'MetaDataPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'CopyPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'SyncPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'MetaDataSizeBytes', 't')
class LvCommon(AutomatedProperties):
_Tags_meta = ("as", LV_COMMON_INTERFACE)
_Roles_meta = ("as", LV_COMMON_INTERFACE)
_IsThinVolume_meta = ("b", LV_COMMON_INTERFACE)
_IsThinPool_meta = ("b", LV_COMMON_INTERFACE)
_Active_meta = ("b", LV_COMMON_INTERFACE)
_VolumeType_meta = ("(ss)", LV_COMMON_INTERFACE)
_Permissions_meta = ("(ss)", LV_COMMON_INTERFACE)
_AllocationPolicy_meta = ("(ss)", LV_COMMON_INTERFACE)
_State_meta = ("(ss)", LV_COMMON_INTERFACE)
_TargetType_meta = ("(ss)", LV_COMMON_INTERFACE)
_Health_meta = ("(ss)", LV_COMMON_INTERFACE)
_FixedMinor_meta = ('b', LV_COMMON_INTERFACE)
_ZeroBlocks_meta = ('b', LV_COMMON_INTERFACE)
_SkipActivation_meta = ('b', LV_COMMON_INTERFACE)
_MovePv_meta = ('o', LV_COMMON_INTERFACE)
def _get_move_pv(self):
path = None
# It's likely that the move_pv is empty
if self.state.move_pv_uuid and self.state.move_pv:
path = cfg.om.get_object_path_by_uuid_lvm_id(
self.state.move_pv_uuid, self.state.move_pv)
if not path:
path = '/'
return path
# noinspection PyUnusedLocal,PyPep8Naming
def __init__(self, object_path, object_state):
super(LvCommon, self).__init__(object_path, lvs_state_retrieve)
self.set_interface(LV_COMMON_INTERFACE)
self.state = object_state
self._move_pv = self._get_move_pv()
@staticmethod
def handle_execute(rc, out, err):
if rc == 0:
cfg.load()
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
@staticmethod
def validate_dbus_object(lv_uuid, lv_name):
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
if not dbo:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(lv_uuid, lv_name))
return dbo
@property
def VolumeType(self):
type_map = {'C': 'Cache', 'm': 'mirrored',
'M': 'Mirrored without initial sync', 'o': 'origin',
'O': 'Origin with merging snapshot', 'r': 'raid',
'R': 'Raid without initial sync', 's': 'snapshot',
'S': 'merging Snapshot', 'p': 'pvmove',
'v': 'virtual', 'i': 'mirror or raid image',
'I': 'mirror or raid Image out-of-sync',
'l': 'mirror log device', 'c': 'under conversion',
'V': 'thin Volume', 't': 'thin pool', 'T': 'Thin pool data',
'e': 'raid or pool metadata or pool metadata spare',
'-': 'Unspecified'}
return dbus.Struct((self.state.Attr[0], type_map[self.state.Attr[0]]),
signature="as")
@property
def Permissions(self):
type_map = {'w': 'writable', 'r': 'read-only',
'R': 'Read-only activation of non-read-only volume',
'-': 'Unspecified'}
return dbus.Struct((self.state.Attr[1], type_map[self.state.Attr[1]]),
signature="(ss)")
@property
def AllocationPolicy(self):
type_map = {'a': 'anywhere', 'A': 'anywhere locked',
'c': 'contiguous', 'C': 'contiguous locked',
'i': 'inherited', 'I': 'inherited locked',
'l': 'cling', 'L': 'cling locked',
'n': 'normal', 'N': 'normal locked', '-': 'Unspecified'}
return dbus.Struct((self.state.Attr[2], type_map[self.state.Attr[2]]),
signature="(ss)")
@property
def FixedMinor(self):
return dbus.Boolean(self.state.Attr[3] == 'm')
@property
def State(self):
type_map = {'a': 'active', 's': 'suspended', 'I': 'Invalid snapshot',
'S': 'invalid Suspended snapshot',
'm': 'snapshot merge failed',
'M': 'suspended snapshot (M)erge failed',
'd': 'mapped device present without tables',
'i': 'mapped device present with inactive table',
'X': 'unknown', '-': 'Unspecified'}
return dbus.Struct((self.state.Attr[4], type_map[self.state.Attr[4]]),
signature="(ss)")
@property
def TargetType(self):
type_map = {'C': 'Cache', 'm': 'mirror', 'r': 'raid',
's': 'snapshot', 't': 'thin', 'u': 'unknown',
'v': 'virtual', '-': 'Unspecified'}
return dbus.Struct((self.state.Attr[6], type_map[self.state.Attr[6]]),
signature="(ss)")
@property
def ZeroBlocks(self):
return dbus.Boolean(self.state.Attr[7] == 'z')
@property
def Health(self):
type_map = {'p': 'partial', 'r': 'refresh',
'm': 'mismatches', 'w': 'writemostly',
'X': 'X unknown', '-': 'Unspecified'}
return dbus.Struct((self.state.Attr[8], type_map[self.state.Attr[8]]),
signature="(ss)")
@property
def SkipActivation(self):
return dbus.Boolean(self.state.Attr[9] == 'k')
def vg_name_lookup(self):
return self.state.vg_name_lookup()
def lv_full_name(self):
return "%s/%s" % (self.state.vg_name_lookup(), self.state.Name)
@property
def identifiers(self):
return self.state.identifiers
@property
def Tags(self):
return utils.parse_tags(self.state.Tags)
@property
def Roles(self):
return utils.parse_tags(self.state.role)
@property
def lvm_id(self):
return self.state.lvm_id
@property
def IsThinVolume(self):
return dbus.Boolean(self.state.Attr[0] == 'V')
@property
def IsThinPool(self):
return dbus.Boolean(self.state.Attr[0] == 't')
@property
def Active(self):
return dbus.Boolean(self.state.active == "active")
@property
def MovePv(self):
return dbus.ObjectPath(self._move_pv)
# noinspection PyPep8Naming
class Lv(LvCommon):
def _fetch_hidden(self, name):
# The name is vg/name
full_name = "%s/%s" % (self.vg_name_lookup(), name)
return cfg.om.get_object_path_by_lvm_id(full_name)
def _get_data_meta(self):
# Get the data
return (self._fetch_hidden(self.state.data_lv),
self._fetch_hidden(self.state.metadata_lv))
# noinspection PyUnusedLocal,PyPep8Naming
def __init__(self, object_path, object_state):
super(Lv, self).__init__(object_path, object_state)
self.set_interface(LV_INTERFACE)
self.state = object_state
@staticmethod
def _remove(lv_uuid, lv_name, remove_options):
# Make sure we have a dbus object representing it
LvCommon.validate_dbus_object(lv_uuid, lv_name)
# Remove the LV, if successful then remove from the model
rc, out, err = cmdhandler.lv_remove(lv_name, remove_options)
LvCommon.handle_execute(rc, out, err)
return '/'
@dbus.service.method(
dbus_interface=LV_INTERFACE,
in_signature='ia{sv}',
out_signature='o',
async_callbacks=('cb', 'cbe'))
def Remove(self, tmo, remove_options, cb, cbe):
r = RequestEntry(
tmo, Lv._remove,
(self.Uuid, self.lvm_id, remove_options),
cb, cbe, False)
cfg.worker_q.put(r)
@staticmethod
def _rename(lv_uuid, lv_name, new_name, rename_options):
# Make sure we have a dbus object representing it
LvCommon.validate_dbus_object(lv_uuid, lv_name)
# Rename the logical volume
rc, out, err = cmdhandler.lv_rename(lv_name, new_name,
rename_options)
LvCommon.handle_execute(rc, out, err)
return '/'
@dbus.service.method(
dbus_interface=LV_INTERFACE,
in_signature='sia{sv}',
out_signature='o',
async_callbacks=('cb', 'cbe'))
def Rename(self, name, tmo, rename_options, cb, cbe):
utils.validate_lv_name(LV_INTERFACE, self.vg_name_lookup(), name)
r = RequestEntry(
tmo, Lv._rename,
(self.Uuid, self.lvm_id, name, rename_options),
cb, cbe, False)
cfg.worker_q.put(r)
@dbus.service.method(
dbus_interface=LV_INTERFACE,
in_signature='o(tt)a(ott)ia{sv}',
out_signature='o',
async_callbacks=('cb', 'cbe'))
def Move(self, pv_src_obj, pv_source_range,
pv_dests_and_ranges,
tmo, move_options, cb, cbe):
job_state = JobState()
r = RequestEntry(
tmo, background.move,
(LV_INTERFACE, self.lvm_id, pv_src_obj, pv_source_range,
pv_dests_and_ranges, move_options, job_state), cb, cbe, False,
job_state)
background.cmd_runner(r)
@staticmethod
def _snap_shot(lv_uuid, lv_name, name, optional_size,
snapshot_options):
# Make sure we have a dbus object representing it
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
# If you specify a size you get a 'thick' snapshot even if
# it is a thin lv
if not dbo.IsThinVolume:
if optional_size == 0:
space = dbo.SizeBytes // 80
remainder = space % 512
optional_size = space + 512 - remainder
rc, out, err = cmdhandler.vg_lv_snapshot(
lv_name, snapshot_options, name, optional_size)
LvCommon.handle_execute(rc, out, err)
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
return cfg.om.get_object_path_by_lvm_id(full_name)
@dbus.service.method(
dbus_interface=LV_INTERFACE,
in_signature='stia{sv}',
out_signature='(oo)',
async_callbacks=('cb', 'cbe'))
def Snapshot(self, name, optional_size, tmo,
snapshot_options, cb, cbe):
utils.validate_lv_name(LV_INTERFACE, self.vg_name_lookup(), name)
r = RequestEntry(
tmo, Lv._snap_shot,
(self.Uuid, self.lvm_id, name,
optional_size, snapshot_options), cb, cbe)
cfg.worker_q.put(r)
@staticmethod
def _resize(lv_uuid, lv_name, new_size_bytes, pv_dests_and_ranges,
resize_options):
# Make sure we have a dbus object representing it
pv_dests = []
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
# If we have PVs, verify them
if len(pv_dests_and_ranges):
for pr in pv_dests_and_ranges:
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
if not pv_dbus_obj:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'PV Destination (%s) not found' % pr[0])
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
size_change = new_size_bytes - dbo.SizeBytes
rc, out, err = cmdhandler.lv_resize(dbo.lvm_id, size_change,
pv_dests, resize_options)
LvCommon.handle_execute(rc, out, err)
return "/"
@dbus.service.method(
dbus_interface=LV_INTERFACE,
in_signature='ta(ott)ia{sv}',
out_signature='o',
async_callbacks=('cb', 'cbe'))
def Resize(self, new_size_bytes, pv_dests_and_ranges, tmo,
resize_options, cb, cbe):
"""
Resize a LV
:param new_size_bytes: The requested final size in bytes
:param pv_dests_and_ranges: An array of pv object paths and src &
dst. segment ranges
:param tmo: -1 to wait forever, 0 to return job immediately, else
number of seconds to wait for operation to complete
before getting a job
:param resize_options: key/value hash of options
:param cb: Used by framework not client facing API
:param cbe: Used by framework not client facing API
:return: '/' if complete, else job object path
"""
r = RequestEntry(
tmo, Lv._resize,
(self.Uuid, self.lvm_id, round_size(new_size_bytes),
pv_dests_and_ranges,
resize_options), cb, cbe, return_tuple=False)
cfg.worker_q.put(r)
@staticmethod
def _lv_activate_deactivate(uuid, lv_name, activate, control_flags,
options):
# Make sure we have a dbus object representing it
LvCommon.validate_dbus_object(uuid, lv_name)
rc, out, err = cmdhandler.activate_deactivate(
'lvchange', lv_name, activate, control_flags, options)
LvCommon.handle_execute(rc, out, err)
return '/'
@dbus.service.method(
dbus_interface=LV_INTERFACE,
in_signature='tia{sv}',
out_signature='o',
async_callbacks=('cb', 'cbe'))
def Activate(self, control_flags, tmo, activate_options, cb, cbe):
r = RequestEntry(
tmo, Lv._lv_activate_deactivate,
(self.state.Uuid, self.state.lvm_id, True,
control_flags, activate_options),
cb, cbe, return_tuple=False)
cfg.worker_q.put(r)
# noinspection PyProtectedMember
@dbus.service.method(
dbus_interface=LV_INTERFACE,
in_signature='tia{sv}',
out_signature='o',
async_callbacks=('cb', 'cbe'))
def Deactivate(self, control_flags, tmo, activate_options, cb, cbe):
r = RequestEntry(
tmo, Lv._lv_activate_deactivate,
(self.state.Uuid, self.state.lvm_id, False,
control_flags, activate_options),
cb, cbe, return_tuple=False)
cfg.worker_q.put(r)
@staticmethod
def _add_rm_tags(uuid, lv_name, tags_add, tags_del, tag_options):
# Make sure we have a dbus object representing it
LvCommon.validate_dbus_object(uuid, lv_name)
rc, out, err = cmdhandler.lv_tag(
lv_name, tags_add, tags_del, tag_options)
LvCommon.handle_execute(rc, out, err)
return '/'
@dbus.service.method(
dbus_interface=LV_INTERFACE,
in_signature='asia{sv}',
out_signature='o',
async_callbacks=('cb', 'cbe'))
def TagsAdd(self, tags, tmo, tag_options, cb, cbe):
for t in tags:
utils.validate_tag(LV_INTERFACE, t)
r = RequestEntry(
tmo, Lv._add_rm_tags,
(self.state.Uuid, self.state.lvm_id,
tags, None, tag_options),
cb, cbe, return_tuple=False)
cfg.worker_q.put(r)
@dbus.service.method(
dbus_interface=LV_INTERFACE,
in_signature='asia{sv}',
out_signature='o',
async_callbacks=('cb', 'cbe'))
def TagsDel(self, tags, tmo, tag_options, cb, cbe):
for t in tags:
utils.validate_tag(LV_INTERFACE, t)
r = RequestEntry(
tmo, Lv._add_rm_tags,
(self.state.Uuid, self.state.lvm_id,
None, tags, tag_options),
cb, cbe, return_tuple=False)
cfg.worker_q.put(r)
# noinspection PyPep8Naming
class LvThinPool(Lv):
_DataLv_meta = ("o", THIN_POOL_INTERFACE)
_MetaDataLv_meta = ("o", THIN_POOL_INTERFACE)
def __init__(self, object_path, object_state):
super(LvThinPool, self).__init__(object_path, object_state)
self.set_interface(THIN_POOL_INTERFACE)
self._data_lv, self._metadata_lv = self._get_data_meta()
@property
def DataLv(self):
return dbus.ObjectPath(self._data_lv)
@property
def MetaDataLv(self):
return dbus.ObjectPath(self._metadata_lv)
@staticmethod
def _lv_create(lv_uuid, lv_name, name, size_bytes, create_options):
# Make sure we have a dbus object representing it
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
rc, out, err = cmdhandler.lv_lv_create(
lv_name, create_options, name, size_bytes)
LvCommon.handle_execute(rc, out, err)
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
return cfg.om.get_object_path_by_lvm_id(full_name)
@dbus.service.method(
dbus_interface=THIN_POOL_INTERFACE,
in_signature='stia{sv}',
out_signature='(oo)',
async_callbacks=('cb', 'cbe'))
def LvCreate(self, name, size_bytes, tmo, create_options, cb, cbe):
utils.validate_lv_name(THIN_POOL_INTERFACE, self.vg_name_lookup(), name)
r = RequestEntry(
tmo, LvThinPool._lv_create,
(self.Uuid, self.lvm_id, name,
round_size(size_bytes), create_options), cb, cbe)
cfg.worker_q.put(r)
# noinspection PyPep8Naming
class LvCachePool(Lv):
_DataLv_meta = ("o", CACHE_POOL_INTERFACE)
_MetaDataLv_meta = ("o", CACHE_POOL_INTERFACE)
def __init__(self, object_path, object_state):
super(LvCachePool, self).__init__(object_path, object_state)
self.set_interface(CACHE_POOL_INTERFACE)
self._data_lv, self._metadata_lv = self._get_data_meta()
@property
def DataLv(self):
return dbus.ObjectPath(self._data_lv)
@property
def MetaDataLv(self):
return dbus.ObjectPath(self._metadata_lv)
@staticmethod
def _cache_lv(lv_uuid, lv_name, lv_object_path, cache_options):
# Make sure we have a dbus object representing cache pool
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
# Make sure we have dbus object representing lv to cache
lv_to_cache = cfg.om.get_object_by_path(lv_object_path)
if lv_to_cache:
fcn = lv_to_cache.lv_full_name()
rc, out, err = cmdhandler.lv_cache_lv(
dbo.lv_full_name(), fcn, cache_options)
if rc == 0:
# When we cache an LV, the cache pool and the lv that is getting
# cached need to be removed from the object manager and
# re-created as their interfaces have changed!
mt_remove_dbus_objects((dbo, lv_to_cache))
cfg.load()
lv_converted = cfg.om.get_object_path_by_lvm_id(fcn)
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE, 'LV to cache with object path %s not present!' %
lv_object_path)
return lv_converted
@dbus.service.method(
dbus_interface=CACHE_POOL_INTERFACE,
in_signature='oia{sv}',
out_signature='(oo)',
async_callbacks=('cb', 'cbe'))
def CacheLv(self, lv_object, tmo, cache_options, cb, cbe):
r = RequestEntry(
tmo, LvCachePool._cache_lv,
(self.Uuid, self.lvm_id, lv_object,
cache_options), cb, cbe)
cfg.worker_q.put(r)
# noinspection PyPep8Naming
class LvCacheLv(Lv):
_CachePool_meta = ("o", LV_CACHED)
def __init__(self, object_path, object_state):
super(LvCacheLv, self).__init__(object_path, object_state)
self.set_interface(LV_CACHED)
@property
def CachePool(self):
return dbus.ObjectPath(self.state.PoolLv)
@staticmethod
def _detach_lv(lv_uuid, lv_name, detach_options, destroy_cache):
# Make sure we have a dbus object representing cache pool
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
# Get current cache name
cache_pool = cfg.om.get_object_by_path(dbo.CachePool)
rc, out, err = cmdhandler.lv_detach_cache(
dbo.lv_full_name(), detach_options, destroy_cache)
if rc == 0:
# The cache pool gets removed as hidden and put back to
# visible, so lets delete
mt_remove_dbus_objects((cache_pool, dbo))
cfg.load()
uncached_lv_path = cfg.om.get_object_path_by_lvm_id(lv_name)
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
return uncached_lv_path
@dbus.service.method(
dbus_interface=LV_CACHED,
in_signature='bia{sv}',
out_signature='(oo)',
async_callbacks=('cb', 'cbe'))
def DetachCachePool(self, destroy_cache, tmo, detach_options, cb, cbe):
r = RequestEntry(
tmo, LvCacheLv._detach_lv,
(self.Uuid, self.lvm_id, detach_options,
destroy_cache), cb, cbe)
cfg.worker_q.put(r)
# noinspection PyPep8Naming
class LvSnapShot(Lv):
def __init__(self, object_path, object_state):
super(LvSnapShot, self).__init__(object_path, object_state)
self.set_interface(SNAPSHOT_INTERFACE)
@dbus.service.method(
dbus_interface=SNAPSHOT_INTERFACE,
in_signature='ia{sv}',
out_signature='o',
async_callbacks=('cb', 'cbe'))
def Merge(self, tmo, merge_options, cb, cbe):
job_state = JobState()
r = RequestEntry(tmo, background.merge,
(SNAPSHOT_INTERFACE, self.Uuid, self.lvm_id,
merge_options, job_state), cb, cbe, False,
job_state)
background.cmd_runner(r)
| 30.247596 | 79 | 0.710522 |
036fb7b6cbbad0bc47fd87c70ccf51053854b213
| 548 |
py
|
Python
|
jumeaux/addons/models.py
|
ihatov08/jumeaux
|
7d983474df4b6dcfa57ea1a66901fbc99ebababa
|
[
"MIT"
] | 11 |
2017-10-02T01:29:12.000Z
|
2022-03-31T08:37:22.000Z
|
jumeaux/addons/models.py
|
ihatov08/jumeaux
|
7d983474df4b6dcfa57ea1a66901fbc99ebababa
|
[
"MIT"
] | 79 |
2017-07-16T14:47:17.000Z
|
2022-03-31T08:49:14.000Z
|
jumeaux/addons/models.py
|
ihatov08/jumeaux
|
7d983474df4b6dcfa57ea1a66901fbc99ebababa
|
[
"MIT"
] | 2 |
2019-01-28T06:11:58.000Z
|
2021-01-25T07:21:21.000Z
|
# -*- coding: utf-8 -*-
from owlmixin import OwlMixin, TOption, TList
class Addon(OwlMixin):
name: str
cls_name: str = "Executor"
config: TOption[dict]
include: TOption[str]
tags: TOption[TList[str]]
# List is None...
class Addons(OwlMixin):
log2reqs: Addon
reqs2reqs: TList[Addon] = []
res2res: TList[Addon] = []
res2dict: TList[Addon] = []
judgement: TList[Addon] = []
store_criterion: TList[Addon] = []
dump: TList[Addon] = []
did_challenge: TList[Addon] = []
final: TList[Addon] = []
| 22.833333 | 45 | 0.614964 |
037b15551d166c8c16f7ab353f07ce460a9c9d1e
| 4,696 |
py
|
Python
|
FuzzyMachine.py
|
siej88/FuzzyACO
|
989a58049c8417cd023cfc312fb99d2649333ca7
|
[
"MIT"
] | null | null | null |
FuzzyMachine.py
|
siej88/FuzzyACO
|
989a58049c8417cd023cfc312fb99d2649333ca7
|
[
"MIT"
] | null | null | null |
FuzzyMachine.py
|
siej88/FuzzyACO
|
989a58049c8417cd023cfc312fb99d2649333ca7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
UNIVERSIDAD DE CONCEPCION
Departamento de Ingenieria Informatica y
Ciencias de la Computacion
Memoria de Titulo Ingenieria Civil Informatica
DETECCION DE BORDES EN IMAGENES DGGE USANDO UN
SISTEMA HIBRIDO ACO CON LOGICA DIFUSA
Autor: Sebastian Ignacio Espinoza Jimenez
Patrocinante: Maria Angelica Pinninghoff Junemann
"""
import numpy as N
import MathTools as mat
class FuzzyMachine(object):
"""Mamdani-Type Fuzzy Inference Engine"""
def __init__(self):
"""FuzzyMachine FuzzyMachine()"""
self._heuristicMatrix = None
self._imageFlag = False
self._mathTools = mat.MathTools()
def hasHeuristicMatrix(self):
"""bool hasHeuristicMatrix()"""
return self._imageFlag
def getHeuristicMatrix(self):
"""numpy.array getHeuristicMatrix()"""
return N.copy(self._heuristicMatrix)
def generateHeuristicMatrix(self, intensityMatrix, categorySet, parameterSet, ruleList):
"""numpy.array generateHeuristicMatrix(numpy.array intensityMatrix,
dict categorySet, dict parameterSet, list ruleSet)"""
deFuzzificationMode = parameterSet['deFuzzificationMode']
variableMatrixSet = self._generateVariableMatrixSet(intensityMatrix)
deFuzzifierAggregator = {}
categoryKeys = categorySet.keys()
for k in categoryKeys:
if categorySet[k]['variable'] == 'edge':
deFuzzifierAggregator[k] = []
ruleCount = len(ruleList)
for i in xrange(ruleCount):
categoryCount = len(ruleList[i])
minimumMatrixList = []
edgeCategory = ''
for j in xrange(categoryCount):
category = ruleList[i][j]
variable = categorySet[category]['variable']
if variable != 'edge':
mean = categorySet[category]['mean']
scale = categorySet[category]['scale']
minimumMatrixList.append(self._mathTools.gaussian(variableMatrixSet[variable], mean, scale))
else:
edgeCategory = category
minimumMatrix = self._mathTools.minimum(minimumMatrixList)
deFuzzifierAggregator[edgeCategory].append(minimumMatrix)
maximumMatrixSet = {}
maximumMatrixList = []
edgeCategoryKeys = deFuzzifierAggregator.keys()
for k in edgeCategoryKeys:
if len(deFuzzifierAggregator[k]) > 0:
maximumMatrixSet[k] = self._mathTools.maximum(deFuzzifierAggregator[k])
maximumMatrixList.append(maximumMatrixSet[k])
maximumValues = self._mathTools.maximum(maximumMatrixList)
heuristicMatrix = N.zeros_like(intensityMatrix)
edgeCategoryKeys = maximumMatrixSet.keys()
if deFuzzificationMode != 2:
for k in edgeCategoryKeys:
indexes = N.where(maximumValues == maximumMatrixSet[k])
values = maximumMatrixSet[k][indexes]
values[N.where(values == 0)] = 1e-10
mean = categorySet[k]['mean']
scale = categorySet[k]['scale']
heuristicMatrix[indexes] = self._mathTools.inverseGaussian(values, mean, scale, deFuzzificationMode)
else:
summationMatrix = N.zeros_like(intensityMatrix)
for k in edgeCategoryKeys:
mean = categorySet[k]['mean']
scale = categorySet[k]['scale']
heuristicMatrix += maximumMatrixSet[k] * mean * scale
summationMatrix += maximumMatrixSet[k] * scale
summationMatrix[N.where(summationMatrix == 0)] = 1e-10
heuristicMatrix /= summationMatrix
heuristicMatrix *= self._mathTools.standardDeviation(intensityMatrix)
heuristicMatrix = self._mathTools.normalize(heuristicMatrix)
self._heuristicMatrix = N.copy(heuristicMatrix)
self._imageFlag = True
return heuristicMatrix
def _generateVariableMatrixSet(self, intensityMatrix):
"""dict _generateFuzzyVariableMatrices(numpy.array intensityMatrix)"""
variableMatrix = {}
convolutionMask = {}
convolutionMask['mRow'] = N.array([[1,1,1],[-2,-2,-2],[1,1,1]])/3.
convolutionMask['mCol'] = N.array([[1,-2,1],[1,-2,1],[1,-2,1]])/3.
convolutionMask['iDiag'] = N.array([[1,1,1],[1,-8,1],[1,1,1]])
for v in convolutionMask.keys():
variableMatrix[v] = N.abs(self._mathTools.convolve(intensityMatrix, convolutionMask[v]))
return variableMatrix
| 45.592233 | 117 | 0.619676 |
00266780457269454f028a075438fc674e350e31
| 1,505 |
py
|
Python
|
pages/extensions/amp_example_preview/util/preview_test.py
|
ericandrewlewis/amp.dev
|
cf8e3d34a5582696ead97563c0809036804ab5c7
|
[
"Apache-2.0"
] | 300 |
2015-12-09T20:35:37.000Z
|
2019-07-16T06:41:29.000Z
|
pages/extensions/amp_example_preview/util/preview_test.py
|
ericandrewlewis/amp.dev
|
cf8e3d34a5582696ead97563c0809036804ab5c7
|
[
"Apache-2.0"
] | 2,099 |
2019-07-16T13:24:27.000Z
|
2022-03-26T12:31:51.000Z
|
pages/extensions/amp_example_preview/util/preview_test.py
|
ericandrewlewis/amp.dev
|
cf8e3d34a5582696ead97563c0809036804ab5c7
|
[
"Apache-2.0"
] | 543 |
2019-07-18T09:06:14.000Z
|
2022-03-31T02:43:10.000Z
|
"""Tests for the source code extractor."""
import unittest
import sys
import os
sys.path.extend([os.path.join(os.path.dirname(__file__), '.')])
from preview import ExamplePreview, ExamplePreviewMatch
class PreviewTestCase(unittest.TestCase):
def test_preview_wrap_and_extract(self):
example_code = '<h1>test {% test = "test" %} {{ test + \'123\' }}</h1>'
preview_created = ExamplePreview(index=2,
mode='top-frame',
orientation='landscape',
url='http://localhost/test',
playground=True,
source=example_code)
start_tag = preview_created.get_start_tag()
self.assertFalse('<h1>' in start_tag, 'html should be escaped')
html_code = start_tag \
+ example_code \
+ preview_created.get_end_tag()
html_code = '<p>before</p>' + html_code + '<p>after</p>'
extracted_previews = ExamplePreviewMatch.extract_previews(html_code)
self.assertEqual(1, len(extracted_previews))
preview_extracted = extracted_previews[0].preview
self.assertEqual(preview_created.mode, preview_extracted.mode)
self.assertEqual(preview_created.orientation, preview_extracted.orientation)
self.assertEqual(preview_created.url, preview_extracted.url)
self.assertEqual(preview_created.playground, preview_extracted.playground)
self.assertEqual(preview_created.source, preview_extracted.source)
| 31.354167 | 80 | 0.663787 |
cc5f0dc491a1c6ec70913655d806751ad73db3fc
| 388 |
py
|
Python
|
PINp/2014/Platonova Olga/task_2_21.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PINp/2014/Platonova Olga/task_2_21.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PINp/2014/Platonova Olga/task_2_21.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 2. Вариант 21.
#Напишите программу, которая будет выводить на экран наиболее понравившееся вам высказывание, автором которого является Леонардо да Винчи. Не забудьте о том, что автор должен быть упомянут на отдельной строке.
# Platonova O. A.
# 29.05.2016
print("Истина была единственной дочерью времени.\n\t\t\t\t\t\tЛеонардо да Винчи")
input("\n\nНажмите Enter для выхода.")
| 43.111111 | 209 | 0.775773 |
4e3f760bed76b655bdf6c28d45247b833f2f0db2
| 319 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v5_0/is_group.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v5_0/is_group.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v5_0/is_group.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doctype("Account")
frappe.reload_doctype("Cost Center")
frappe.db.sql("update tabAccount set is_group = if(group_or_ledger='Group', 1, 0)")
frappe.db.sql("update `tabCost Center` set is_group = if(group_or_ledger='Group', 1, 0)")
| 31.9 | 90 | 0.758621 |
ae3e6b56ca14287a054210c4def72bed4b5418c6
| 638 |
py
|
Python
|
Kapitel_2/_2_classmethods.py
|
Geralonx/Classes_Tutorial
|
9499db8159efce1e3c38975b66a9c649631c6727
|
[
"MIT"
] | 1 |
2020-12-24T15:42:54.000Z
|
2020-12-24T15:42:54.000Z
|
Kapitel_2/_2_classmethods.py
|
Geralonx/Classes_Tutorial
|
9499db8159efce1e3c38975b66a9c649631c6727
|
[
"MIT"
] | null | null | null |
Kapitel_2/_2_classmethods.py
|
Geralonx/Classes_Tutorial
|
9499db8159efce1e3c38975b66a9c649631c6727
|
[
"MIT"
] | null | null | null |
# --- Deklaration einer Klasse mit Konstruktor und Factory/Class-Method als alternative Instanziierungsmöglichkeit --- #
class Circle:
def __init__(self, radius):
self.radius = radius
# --- Facorymethod, um eine Instanz der 'Cricle' Klasse mittels dem Durchmesse zu erstellen --- #
@classmethod
def from_diameter(cls, diameter):
calculated_radius = diameter/2
return cls(radius=calculated_radius)
# --- Instanziierung über den Standardkonstruktor --- #
c1 = Circle(10)
# --- Instanziierung über die Factorymethod --- #
c2 = Circle.from_diameter(40)
print(c1.radius)
print(c2.radius)
# 10
# 20
| 27.73913 | 120 | 0.703762 |
ee0e435fca44bec4eaeb0484af61116ce2b6a586
| 1,606 |
py
|
Python
|
Curso_Python/Secao3-Python-Intermediario-Programacao-Procedural/56_dicionarios/dicionario.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao3-Python-Intermediario-Programacao-Procedural/56_dicionarios/dicionario.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao3-Python-Intermediario-Programacao-Procedural/56_dicionarios/dicionario.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
"""
Dicionários
"""
"""
parte 1
dicionario = {1: 'Valor 1', 2: 'Valor 2', 3: 'Valor3'}
dicionario['str'] = 'valor'
dicionario['1'] = 'Agora existe'
if dicionario.get('str') is not None: # Retorna um valor none, mas como existe nn retorna um valor None
print(dicionario.get('str'))
print(dicionario)
del dicionario['1'] # Apaga valor
print(dicionario)
print('str' in dicionario) # Retorna um valor Booleano
print(len(dicionario))
for k in dicionario.items(): # mostra o dicionário completo .items
print(k[0], k[1])
for k, v in dicionario.items():
print(k, v)
"""
""" Parte 2
clientes = {'cliente1': {'nome': 'Luiz', 'sobrenome': 'Ótavio'},
'cliente2': {'nome': 'João', 'sobrenome': 'Moreira'}}
for clientes_k, clientes_v in clientes.items():
print(f'Exibindo {clientes_k}')
for dados_k, dados_v in clientes_v.items():
print(f'\t{dados_k} = {dados_v}')
"""
""" parte 3 cria uma copia raza
# a copy pode acabar nn sendo muito util, pois dependendo da situação o valor do dicionario vai mudar junto com o outro
d1 = {1: 'a', 2: 'b', 3: 'c', 4: ['Luiz', 'Otavio']}
v = d1.copy()
v[1] = 'Luiz'
v[4][0] = 'Joãozinho'
print(d1)
print(v)
"""
"""
# Cria uma copia de vdd, nn muda as duas em hipotese nenhuma
import copy
d1 = {1: 'a', 2: 'b', 3: 'c', 4: ['Luiz', 'Otavio']}
v = copy.deepcopy(d1)
v[1] = 'Luiz'
v[4][0] = 'Joãozinho'
print(d1)
print(v)
"""
d1 = {1: 1, 2: 2, 3: 3}
d1.pop(3) # Apaga o valor desejado da lista
d1.popitem() # Apaga o ultimo valor da lista
print(d1)
d2 = {4: 4, 5: 5, 6: 6, 7: 7}
d1.update(d2) # Junta a outra dicionario na d1
print(d1)
| 27.220339 | 120 | 0.629514 |
c9e37eee7feaad158cc918f0c8d44eb747a75acf
| 4,114 |
py
|
Python
|
experiments/extract_arguments.py
|
habecker/transfer-gan
|
b935e69d0fa0d37ba80aab091ce59e1657eacb1e
|
[
"BSD-3-Clause"
] | null | null | null |
experiments/extract_arguments.py
|
habecker/transfer-gan
|
b935e69d0fa0d37ba80aab091ce59e1657eacb1e
|
[
"BSD-3-Clause"
] | 5 |
2021-03-19T14:21:08.000Z
|
2022-03-12T00:42:00.000Z
|
experiments/extract_arguments.py
|
habecker/transfer-gan
|
b935e69d0fa0d37ba80aab091ce59e1657eacb1e
|
[
"BSD-3-Clause"
] | null | null | null |
import glob
import re
import yaml
import os
from collections import OrderedDict
ignore_files = {'./train_classifier.sh'}
options_set = set()
options_possibilities = {}
options_for_file = OrderedDict()
experiments = []
train_regex = re.compile(r'^[ ]*python3? train\.py[ ]*(?P<options>.*)$')
test_regex = re.compile(r'^[ ]*python3? test\.py[ ]*(?P<options>.*)$')
split_regex = re.compile(r'[ ]+')
replace_regex = re.compile(r'[ ]+--')
def normalize_dataset(dataset_basename):
if dataset_basename == 'sketch_face_64' or dataset_basename == 'sketch_face_128':
return dataset_basename
if dataset_basename == 'celebA_edges':
return 'celeba_edges_64'
elif dataset_basename == 'celebA_edges_128':
return 'celeba_edges_128'
elif dataset_basename == 'ImageNetAA':
return 'imagenet_imagenet_64'
elif dataset_basename == 'imagenet_64':
return 'imagenet_imagenet_64'
elif dataset_basename == 'imagenet_64_2pair':
return 'imagenet_imagenet_64'
elif dataset_basename == 'cityscapes_128':
return 'labelmap_cityscapes_128'
elif dataset_basename == 'cityscapes_64':
return 'labelmap_cityscapes_64'
elif dataset_basename == 'cityscapes':
assert False
print(dataset_basename)
assert False
def normalize_options(options):
new_options = OrderedDict()
for k,v in options.items():
if k == 'dataroot':
new_options['dataset'] = normalize_dataset(os.path.basename(v))
continue
elif k == 'name':
continue
elif k == 'direction':
continue
elif k == 'input_nc':
continue
elif k == 'output_nc':
continue
elif k == 'load_size':
continue
elif k == 'crop_size':
continue
elif k == 'display_winsize':
continue
elif k == 'gpu_ids':
continue
new_options[k] = v
return new_options
def get_file_options(f):
options_dict = OrderedDict()
for line in f:
match = train_regex.search(line.rstrip())
if match:
options = replace_regex.sub('\t', match.group('options'))
options = [tuple(split_regex.split(opt.replace('--',''))) for opt in options.split('\t')]
for opt in options:
options_set.add(opt[0])
if len(opt) > 2:
exit(0)
if len(opt) > 1:
if opt[0] not in options_possibilities:
options_possibilities[opt[0]] = []
options_possibilities[opt[0]] = opt[1]
options_dict[opt[0]] = opt[1]
else:
options_dict[opt[0]] = True
return options_dict
return None
def get_file_test_options(f):
options_dict = OrderedDict()
for line in f:
match = test_regex.search(line.rstrip())
if match:
options = replace_regex.sub('\t', match.group('options'))
options = [tuple(split_regex.split(opt.replace('--',''))) for opt in options.split('\t')]
for opt in options:
options_set.add(opt[0])
if len(opt) > 2:
print(f, opt)
exit(0)
if len(opt) > 1:
if opt[0] not in options_possibilities:
options_possibilities[opt[0]] = []
options_possibilities[opt[0]] = opt[1]
options_dict[opt[0]] = opt[1]
else:
options_dict[opt[0]] = True
return options_dict
return None
for fp in glob.glob('./bash/*.sh'):
if fp in ignore_files:
continue
name = os.path.basename(fp)[:-3]
experiments.append(name)
with open(fp, 'r') as f:
options_for_file[name] = normalize_options(get_file_options(f))
if __name__ == "__main__":
for name, options in options_for_file.items():
print(" - %s:" % name)
for k,v in options.items():
print(' %s: %s' % (k,v))
| 34.571429 | 101 | 0.563928 |
11de5f6f23f811d80a89019be05c5475357fc7bb
| 8,067 |
py
|
Python
|
lib/SPDbCall.py
|
TechLabCommunity/SaintPeterTalent
|
eb80237de4d73f3a99e82e02edb714f5057bd559
|
[
"MIT"
] | 1 |
2019-01-03T12:59:19.000Z
|
2019-01-03T12:59:19.000Z
|
lib/SPDbCall.py
|
TechLabCommunity/SaintPeterTalent
|
eb80237de4d73f3a99e82e02edb714f5057bd559
|
[
"MIT"
] | null | null | null |
lib/SPDbCall.py
|
TechLabCommunity/SaintPeterTalent
|
eb80237de4d73f3a99e82e02edb714f5057bd559
|
[
"MIT"
] | null | null | null |
from enum import IntEnum
from lib.AbstractSQL import AbstractSQL
class TypeEnter(IntEnum):
NOTHING = 0,
ENTER = 1,
EXIT = 2
class TypeAlarmAction(IntEnum):
NOTHING = 0,
ACTIVATE = 1,
DEACTIVATE = 2
class SPDbCall:
@staticmethod
def exists_access_code(access_code):
query = AbstractSQL.get_query_by_name('EXISTS_ACCESS_CODE')
return AbstractSQL.fetch_execute_one(query, (access_code,))
# (talent_code, member_type, is_master)
@staticmethod
def get_info_user(access_code):
if access_code is None or not access_code:
return None, None, None, None, None
query = AbstractSQL.get_query_by_name('INFO_USER_ACCESS')
row = AbstractSQL.fetch_execute_one(query, (access_code,))
if row is None:
return None, None, None, None, None
return row[0], row[1], row[2], row[3], list(map(int, row[4].split(',')))
@staticmethod
def is_online(talent_code):
if talent_code is None or not talent_code:
return False
query = AbstractSQL.get_query_by_name('IS_ONLINE_USER')
row = AbstractSQL.fetch_execute_one(query, (talent_code,))
return row is not None
@staticmethod
def n_user_online():
query = AbstractSQL.get_query_by_name('N_USER_ONLINE')
row = AbstractSQL.fetch_execute_one(query, ())
return int(row[0])
@staticmethod
def n_type_user(member_type):
if member_type is None or len(member_type) == 0:
return 0
query = AbstractSQL.get_query_by_name('N_TYPE_USER')
row = AbstractSQL.fetch_execute_one(query, (','.join(list(map(str, member_type))),))
return int(row[0])
@staticmethod
def exit_user(talent_code):
if talent_code is None or not talent_code:
return False
if not SPDbCall.is_online(talent_code):
return False
query = AbstractSQL.get_query_by_name('EXIT_USER')
AbstractSQL.execute_commit(query, (talent_code,))
return True
@staticmethod
def enter_user(talent_code, member_type):
if talent_code is None or not talent_code or member_type is None:
return False
if SPDbCall.is_online(talent_code):
return False
query = AbstractSQL.get_query_by_name('ENTER_USER')
AbstractSQL.execute_commit(query, (talent_code,member_type))
return True
@staticmethod
def all_dependent_by(member_type):
if member_type is None:
return None
query = AbstractSQL.get_query_by_name('LIST_MEMBER_DEPENDING').format(str(member_type))
rows = AbstractSQL.fetch_execute_all(query, ())
ids = []
for r in rows:
ids.append(r[0])
return ids
@staticmethod
def save_log(talent_code, member_type, is_enter, alarm_activation):
if talent_code is None or member_type is None:
return False
query = AbstractSQL.get_query_by_name('SAVE_LOG')
if alarm_activation is None:
alarm_activation = TypeAlarmAction.NOTHING
AbstractSQL.execute_commit(query, (talent_code, member_type, int(is_enter), int(alarm_activation)))
return True
@staticmethod
def empty_jail():
query = AbstractSQL.get_query_by_name('TRUNCATE_ONLINE_MEMBERS')
AbstractSQL.execute_commit(query, ())
return True
@staticmethod
def insert_request_access(accesscode):
if not accesscode or accesscode is None:
return False
query = AbstractSQL.get_query_by_name('INSERT_REQUEST_ACCESS')
AbstractSQL.execute_commit(query, (accesscode,))
return True
@staticmethod
def get_next_request():
query = AbstractSQL.get_query_by_name('GET_NEXT_REQUEST_ACCESS')
row = AbstractSQL.fetch_execute_one(query, ())
if row is None:
return None, None
return int(row[0]), str(row[1])
@staticmethod
def set_request_done(id):
query = AbstractSQL.get_query_by_name('SET_REQUEST_DONE')
try:
AbstractSQL.execute_commit(query, (int(id),))
return True
except:
return False
@staticmethod
def insert_request_serial(stringtosend):
if not stringtosend or stringtosend is None:
return False
query = AbstractSQL.get_query_by_name('INSERT_REQUEST_SERIAL')
AbstractSQL.execute_commit(query, (stringtosend,))
return True
@staticmethod
def get_next_serial_request():
query = AbstractSQL.get_query_by_name('GET_NEXT_STRINGTOSEND')
row = AbstractSQL.fetch_execute_one(query, ())
if row is None:
return None, None
return int(row[0]), str(row[1])
@staticmethod
def set_serial_request_done(id):
query = AbstractSQL.get_query_by_name('SET_REQUEST_SERIAL_DONE')
try:
AbstractSQL.execute_commit(query, (int(id),))
return True
except:
return False
@staticmethod
def insert_request_alarm(alarm_name, alarm_action):
if alarm_name is None or not alarm_name:
return False
query = AbstractSQL.get_query_by_name('INSERT_ALARM_REQUEST')
AbstractSQL.execute_commit(query, (int(alarm_action),alarm_name))
return True
@staticmethod
def get_next_alarm_request():
query = AbstractSQL.get_query_by_name('GET_NEXT_ALARM_REQUEST')
row = AbstractSQL.fetch_execute_one(query, ())
if row is None:
return None, None, None
return int(row[0]), str(row[1]), TypeAlarmAction(int(row[2]))
@staticmethod
def set_alarm_request_done(id):
query = AbstractSQL.get_query_by_name('SET_ALARM_REQUEST_DONE')
try:
AbstractSQL.execute_commit(query, (int(id),))
return True
except:
return False
@staticmethod
def get_info_alarm(name):
query = AbstractSQL.get_query_by_name('GET_INFO_ALARM')
row = AbstractSQL.fetch_execute_one(query, (name,))
return row[0], row[1], row[2]
@staticmethod
def get_insert_query():
return AbstractSQL.get_query_by_name('LOG_STATUS_REGISTER_ALARM')
@staticmethod
def get_all_alarms():
query = AbstractSQL.get_query_by_name('GET_ALL_INFO_ALARMS')
rows = AbstractSQL.fetch_execute_all(query, ())
return rows
@staticmethod
def insert_member(table_name, member):
if table_name is None or member is None:
return False
query = AbstractSQL.get_query_by_name('INSERT_MEMBER_TABLE').replace('table_name', table_name)
AbstractSQL.execute_commit(query,
(
member['Name'],
member['Surname'],
member['MemberType'],
member['ReferenceZone'],
member['AccessCode'],
member['IsActive'],
member['TalentCode'],
member['Username'],
member['Password'],
member['FirstEmail'],
member['FiscalCode']
)
)
return True
@staticmethod
def update_accesscode(table_name, access_code, talent_code):
if table_name is None or talent_code is None or access_code is None:
return False
query = AbstractSQL.get_query_by_name('UPDATE_ACCESSCODE').replace('table_name', table_name)
AbstractSQL.execute_commit(query, (access_code, talent_code))
return True
@staticmethod
def exists_talent_code(talent_code):
query = AbstractSQL.get_query_by_name('EXISTS_TALENTCODE')
return AbstractSQL.fetch_execute_one(query, (talent_code,))
| 35.537445 | 107 | 0.621669 |
e10c2ef51435bb629627c237835b659150cabfe6
| 2,145 |
py
|
Python
|
bot/booking.py
|
kopytjuk/uni-unterkunft
|
c81664e0070f97f45baa6eaff6a71039a267fd37
|
[
"MIT"
] | null | null | null |
bot/booking.py
|
kopytjuk/uni-unterkunft
|
c81664e0070f97f45baa6eaff6a71039a267fd37
|
[
"MIT"
] | null | null | null |
bot/booking.py
|
kopytjuk/uni-unterkunft
|
c81664e0070f97f45baa6eaff6a71039a267fd37
|
[
"MIT"
] | null | null | null |
import dotenv
dotenv.load_dotenv()
from datetime import datetime, date
import argparse
import logging
import os
from typing import Union
import requests
import shapely
from pyproj import Transformer
from flatdict import FlatDict
import pandas as pd
DT_FORMAT = "%Y-%m-%d"
def get_cheapest_nearby_hotels(loc: Union[str, tuple], t_arrival: date, t_departure: date, box_edge:int=20000):
booking_session = requests.Session()
booking_session.headers.update( {
'x-rapidapi-host': "apidojo-booking-v1.p.rapidapi.com",
'x-rapidapi-key': os.environ["RAPIDAPI_BOOKING_COM_API_KEY"]
})
epsg_gps = 4326
epsg_utm32 = 32632
transformer_gps_to_utm = Transformer.from_crs(epsg_gps, epsg_utm32)
transformer_utm_to_gps = Transformer.from_crs(epsg_utm32, epsg_gps)
loc_x, loc_y = transformer_gps_to_utm.transform(*loc)
a = box_edge
# north east
loc_ne_x = loc_x + a/2
loc_ne_y = loc_y + a/2
# south west
loc_sw_x = loc_x - a/2
loc_sw_y = loc_y - a/2
loc_ne_lat, loc_ne_lng = transformer_utm_to_gps.transform(loc_ne_x, loc_ne_y)
loc_sw_lat, loc_sw_lng = transformer_utm_to_gps.transform(loc_sw_x, loc_sw_y)
bbox_string = f"{loc_sw_lat},{loc_ne_lat},{loc_sw_lng},{loc_ne_lng}"
querystring = {"search_id":"none",
"price_filter_currencycode":"EUR",
"languagecode":"de",
"travel_purpose":"leisure",
"categories_filter":"price::0-60,free_cancellation::1,class::1,class::0,class::2",
"children_qty":"0",
"order_by":"price",
"guest_qty":"1",
"room_qty":"1",
"departure_date": t_departure.strftime(DT_FORMAT),
"bbox": bbox_string,
"arrival_date": t_arrival.strftime(DT_FORMAT)}
map_url = "https://apidojo-booking-v1.p.rapidapi.com/properties/list-by-map"
r = booking_session.get(map_url, params=querystring)
r_json = r.json()
r_results = r_json["result"]
dict_list = [FlatDict(r ,delimiter=".") for r in r_results]
df_hotels = pd.DataFrame(dict_list)
return df_hotels
| 28.986486 | 111 | 0.666667 |
61f6c12ca46613d73115b3bc502b0d10ecdc2982
| 405 |
py
|
Python
|
languages/python/exercises/concept/list_methods/list_methods.py
|
AlexLeSang/v3
|
3d35961a961b5a2129b1d42f1d118972d9665357
|
[
"MIT"
] | null | null | null |
languages/python/exercises/concept/list_methods/list_methods.py
|
AlexLeSang/v3
|
3d35961a961b5a2129b1d42f1d118972d9665357
|
[
"MIT"
] | 45 |
2020-01-24T17:04:52.000Z
|
2020-11-24T17:50:18.000Z
|
languages/python/exercises/concept/list_methods/list_methods.py
|
AlexLeSang/v3
|
3d35961a961b5a2129b1d42f1d118972d9665357
|
[
"MIT"
] | null | null | null |
def add_me_to_the_queue(express_queue, normal_queue, ticket_type, person_name):
pass
def find_his_friend(queue, friend_name):
pass
def add_person_with_his_friends(queue, index, person_name):
pass
def remove_the_mean_person(queue, person_name):
pass
def how_many_dopplegangers(queue, person_name):
pass
def remove_the_last_person(queue):
pass
def sorted_names(queue):
pass
| 20.25 | 79 | 0.775309 |
c9148be425e76bc7a0d06155a23a2bc1cbc2459c
| 3,716 |
py
|
Python
|
src/aijack/collaborative/fedmd/api.py
|
luoshenseeker/AIJack
|
4e871a5b3beb4b7c976d38060d6956efcebf880d
|
[
"MIT"
] | 1 |
2022-03-17T21:17:44.000Z
|
2022-03-17T21:17:44.000Z
|
src/aijack/collaborative/fedmd/api.py
|
luoshenseeker/AIJack
|
4e871a5b3beb4b7c976d38060d6956efcebf880d
|
[
"MIT"
] | null | null | null |
src/aijack/collaborative/fedmd/api.py
|
luoshenseeker/AIJack
|
4e871a5b3beb4b7c976d38060d6956efcebf880d
|
[
"MIT"
] | 1 |
2022-03-17T21:17:46.000Z
|
2022-03-17T21:17:46.000Z
|
import copy
from ..core.api import BaseFLKnowledgeDistillationAPI
class FedMDAPI(BaseFLKnowledgeDistillationAPI):
def __init__(
self,
server,
clients,
public_dataloader,
local_dataloaders,
validation_dataloader,
criterion,
client_optimizers,
num_communication=10,
device="cpu",
consensus_epoch=1,
revisit_epoch=1,
transfer_epoch=10,
):
super().__init__(
server,
clients,
public_dataloader,
local_dataloaders,
validation_dataloader,
criterion,
num_communication,
device,
)
self.client_optimizers = client_optimizers
self.consensus_epoch = consensus_epoch
self.revisit_epoch = revisit_epoch
self.transfer_epoch = transfer_epoch
def train_client(self, public=True):
loss_on_local_dataest = []
for client_idx in range(self.client_num):
client = self.clients[client_idx]
if public:
trainloader = self.public_dataloader
else:
trainloader = self.local_dataloaders[client_idx]
optimizer = self.client_optimizers[client_idx]
running_loss = 0.0
for data in trainloader:
x, y = data
x = x.to(self.device)
y = y.to(self.device)
optimizer.zero_grad()
loss = self.criterion(client(x), y)
loss.backward()
optimizer.step()
running_loss += loss.item()
loss_on_local_dataest.append(copy.deepcopy(running_loss / len(trainloader)))
return loss_on_local_dataest
def run(self):
logging = {
"loss_client_local_dataset_transfer": [],
"loss_client_public_dataset_transfer": [],
"loss_client_consensus": [],
"loss_client_revisit": [],
"loss_server_public_dataset": [],
"acc": [],
}
for i in range(self.transfer_epoch):
loss_public = self.train_client(public=True)
loss_local = self.train_client(public=False)
print(f"epoch {i} (public - pretrain): {loss_local}")
print(f"epoch {i} (local - pretrain): {loss_public}")
logging["loss_client_public_dataset_transfer"].append(loss_public)
logging["loss_client_local_dataset_transfer"].append(loss_local)
for i in range(1, self.num_communication + 1):
self.server.update()
self.server.distribute()
# Digest
temp_consensus_loss = []
for j, client in enumerate(self.clients):
for _ in range(self.consensus_epoch):
consensus_loss = client.approach_consensus(
self.client_optimizers[j]
)
print(f"epoch {i}, client {j}: {consensus_loss}")
temp_consensus_loss.append(consensus_loss)
logging["loss_client_consensus"].append(temp_consensus_loss)
# Revisit
for _ in range(self.revisit_epoch):
loss_local_revisit = self.train_client(public=False)
logging["loss_client_revisit"].append(loss_local_revisit)
# evaluation
temp_acc_list = []
for j, client in enumerate(self.clients):
acc = client.score(self.validation_dataloader)
print(f"client {j} acc score is ", acc)
temp_acc_list.append(acc)
logging["acc"].append(temp_acc_list)
return logging
| 33.477477 | 88 | 0.569699 |
c97111e3f8b9a3d6d078bc3dd074e625d02ce72c
| 6,207 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/regional/report/fichier_des_ecritures_comptables_[fec]/fichier_des_ecritures_comptables_[fec].py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/regional/report/fichier_des_ecritures_comptables_[fec]/fichier_des_ecritures_comptables_[fec].py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/regional/report/fichier_des_ecritures_comptables_[fec]/fichier_des_ecritures_comptables_[fec].py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import format_datetime
from frappe import _
def execute(filters=None):
account_details = {}
for acc in frappe.db.sql("""select name, is_group from tabAccount""", as_dict=1):
account_details.setdefault(acc.name, acc)
validate_filters(filters, account_details)
filters = set_account_currency(filters)
columns = get_columns(filters)
res = get_result(filters)
return columns, res
def validate_filters(filters, account_details):
if not filters.get('company'):
frappe.throw(_('{0} is mandatory').format(_('Company')))
if not filters.get('fiscal_year'):
frappe.throw(_('{0} is mandatory').format(_('Fiscal Year')))
def set_account_currency(filters):
filters["company_currency"] = frappe.db.get_value("Company", filters.company, "default_currency")
return filters
def get_columns(filters):
columns = [
_("JournalCode") + "::90", _("JournalLib") + "::90",
_("EcritureNum") + ":Dynamic Link:90", _("EcritureDate") + "::90",
_("CompteNum") + ":Link/Account:100", _("CompteLib") + ":Link/Account:200",
_("CompAuxNum") + "::90", _("CompAuxLib") + "::90",
_("PieceRef") + "::90", _("PieceDate") + "::90",
_("EcritureLib") + "::90", _("Debit") + "::90", _("Credit") + "::90",
_("EcritureLet") + "::90", _("DateLet") +
"::90", _("ValidDate") + "::90",
_("Montantdevise") + "::90", _("Idevise") + "::90"
]
return columns
def get_result(filters):
gl_entries = get_gl_entries(filters)
result = get_result_as_list(gl_entries, filters)
return result
def get_gl_entries(filters):
group_by_condition = "group by voucher_type, voucher_no, account" \
if filters.get("group_by_voucher") else "group by gl.name"
gl_entries = frappe.db.sql("""
select
gl.posting_date as GlPostDate, gl.account, gl.transaction_date,
sum(gl.debit) as debit, sum(gl.credit) as credit,
sum(gl.debit_in_account_currency) as debitCurr, sum(gl.credit_in_account_currency) as creditCurr,
gl.voucher_type, gl.voucher_no, gl.against_voucher_type,
gl.against_voucher, gl.account_currency, gl.against,
gl.party_type, gl.party, gl.is_opening,
inv.name as InvName, inv.posting_date as InvPostDate,
pur.name as PurName, inv.posting_date as PurPostDate,
jnl.cheque_no as JnlRef, jnl.posting_date as JnlPostDate,
pay.name as PayName, pay.posting_date as PayPostDate,
cus.customer_name, cus.name as cusName,
sup.supplier_name, sup.name as supName
from `tabGL Entry` gl
left join `tabSales Invoice` inv on gl.against_voucher = inv.name
left join `tabPurchase Invoice` pur on gl.against_voucher = pur.name
left join `tabJournal Entry` jnl on gl.against_voucher = jnl.name
left join `tabPayment Entry` pay on gl.against_voucher = pay.name
left join `tabCustomer` cus on gl.party = cus.customer_name
left join `tabSupplier` sup on gl.party = sup.supplier_name
where gl.company=%(company)s and gl.fiscal_year=%(fiscal_year)s
{group_by_condition}
order by GlPostDate, voucher_no"""
.format(group_by_condition=group_by_condition), filters, as_dict=1)
return gl_entries
def get_result_as_list(data, filters):
result = []
company_currency = frappe.db.get_value("Company", filters.company, "default_currency")
accounts = frappe.get_all("Account", filters={"Company": filters.company}, fields=["name", "account_number"])
for d in data:
JournalCode = d.get("voucher_no").split("-")[0]
EcritureNum = d.get("voucher_no").split("-")[-1]
EcritureDate = format_datetime(d.get("GlPostDate"), "yyyyMMdd")
account_number = [account.account_number for account in accounts if account.name == d.get("account")]
if account_number[0] is not None:
CompteNum = account_number[0]
else:
frappe.throw(_("Account number for account {0} is not available.<br> Please setup your Chart of Accounts correctly.").format(account.name))
if d.get("party_type") == "Customer":
CompAuxNum = d.get("cusName")
CompAuxLib = d.get("customer_name")
elif d.get("party_type") == "Supplier":
CompAuxNum = d.get("supName")
CompAuxLib = d.get("supplier_name")
else:
CompAuxNum = ""
CompAuxLib = ""
ValidDate = format_datetime(d.get("GlPostDate"), "yyyyMMdd")
if d.get("is_opening") == "Yes":
PieceRef = _("Opening Entry Journal")
PieceDate = format_datetime(d.get("GlPostDate"), "yyyyMMdd")
elif d.get("against_voucher_type") == "Sales Invoice":
PieceRef = _(d.get("InvName"))
PieceDate = format_datetime(d.get("InvPostDate"), "yyyyMMdd")
elif d.get("against_voucher_type") == "Purchase Invoice":
PieceRef = _(d.get("PurName"))
PieceDate = format_datetime(d.get("PurPostDate"), "yyyyMMdd")
elif d.get("against_voucher_type") == "Journal Entry":
PieceRef = _(d.get("JnlRef"))
PieceDate = format_datetime(d.get("JnlPostDate"), "yyyyMMdd")
elif d.get("against_voucher_type") == "Payment Entry":
PieceRef = _(d.get("PayName"))
PieceDate = format_datetime(d.get("PayPostDate"), "yyyyMMdd")
elif d.get("voucher_type") == "Period Closing Voucher":
PieceRef = _("Period Closing Journal")
PieceDate = format_datetime(d.get("GlPostDate"), "yyyyMMdd")
else:
PieceRef = _("No Reference")
PieceDate = format_datetime(d.get("GlPostDate"), "yyyyMMdd")
debit = '{:.2f}'.format(d.get("debit")).replace(".", ",")
credit = '{:.2f}'.format(d.get("credit")).replace(".", ",")
Idevise = d.get("account_currency")
if Idevise != company_currency:
Montantdevise = '{:.2f}'.format(d.get("debitCurr")).replace(".", ",") if d.get("debitCurr") != 0 else '{:.2f}'.format(d.get("creditCurr")).replace(".", ",")
else:
Montantdevise = '{:.2f}'.format(d.get("debit")).replace(".", ",") if d.get("debit") != 0 else '{:.2f}'.format(d.get("credit")).replace(".", ",")
row = [JournalCode, d.get("voucher_type"), EcritureNum, EcritureDate, CompteNum, d.get("account"), CompAuxNum, CompAuxLib,
PieceRef, PieceDate, d.get("voucher_no"), debit, credit, "", "", ValidDate, Montantdevise, Idevise]
result.append(row)
return result
| 34.870787 | 159 | 0.688739 |
c2f719203b3a0529afd0b1b28d46936d6815d2db
| 2,753 |
py
|
Python
|
python_playground/data/LowRank.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | 8 |
2020-04-14T23:17:00.000Z
|
2021-06-21T12:34:04.000Z
|
python_playground/data/LowRank.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | null | null | null |
python_playground/data/LowRank.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | 1 |
2021-01-17T16:26:50.000Z
|
2021-01-17T16:26:50.000Z
|
# the low-rank method for simrank
# Fast computation of SimRank for static and dynamic information networks (EDBT'10)
from memory_profiler import profile
from simrank import *
from utils import make_index_of_vec_n
np.set_printoptions(precision=2)
def preprocess(AT, k=5, IE=False, c=0.6):
'''
AT: A.T, csr sparse matrix
k: number of singular values
IE: whether has the (I-E) term, True would be correct
return: (K_u, Sigma_inverse, K_v, V_r)
'''
if IE == False:
print("computing False lowrank")
else:
print("computing True lowrank")
n = AT.shape[0]
# print("AT:")
# print(AT.toarray())
# normalized AT
print("normalizing ...")
PT = preprocessing.normalize(AT, norm='l1', axis=1)
PT = PT.astype("float32") # reduce the precision to use more memory
print("computing SVD...", "k: ", k)
(U, S, VT) = sparse.linalg.svds(PT, k, which="LM")
print("computing kronecker product...")
K_u = np.kron(U, U)
K_sigma = np.kron(S, S)
K_sigma_inverse = 1 / K_sigma
K_v = np.kron(VT, VT)
print(K_u)
print(K_v)
print("K_v size", K_v.nbytes)
index_of_zero_rows = make_index_of_vec_n(n)
if IE == True: # need to multiply I-E to K_u
K_u[index_of_zero_rows] = 0 # set rows to zeros
# compute Sigma
print("computing sigma..")
Sigma = np.diag(K_sigma_inverse) - c * np.dot(K_v, K_u)
Sigma_inverse = np.linalg.inv(Sigma)
# build vec(I)
vec_I = np.zeros(n ** 2, dtype="int")
vec_I[index_of_zero_rows] = 1
print("computing V_r")
V_r = np.dot(K_v, vec_I)
print("finish indexing")
return (K_u, Sigma_inverse, K_v, V_r)
@profile
def lowrank_simrank(AT, indices=None, IE=False, k=5, c=0.6):
'''
Direct method for simrank
'''
print("low rank for simrank")
n = AT.shape[0]
I = np.eye(n)
vec_I = I.reshape(n ** 2)
if indices == None:
indices = preprocess(AT, k, IE, c) # get the offline indices
(K_u, Sigma_inverse, K_v, V_r) = indices
print("finish indexing, now compute low rank approximation... ")
if IE == False: # the incorrect way
vec_S = (1 - c) * (vec_I + c * np.dot(K_u, np.dot(Sigma_inverse, V_r)))
else:
vec_S = (vec_I + c * np.dot(K_u, np.dot(Sigma_inverse, V_r)))
print("reshaping")
S = vec_S.reshape((n, n))
return S
def test():
# m = adj_mat(A)
# print(type(m))
# print(type(m.transpose()))
# S = simrank(m.transpose())
# print("true simrank")
# print(S)
# ls = lowrank_simrank(m.transpose(), k=3, IE=True)
# print(np.around(ls, 2))
g = load_sparse_csr("./datasets/adj_T/ca-HepTh.npz")
lowrank_simrank(g, k=10, IE=True)
if __name__ == '__main__':
test()
| 29.923913 | 83 | 0.614239 |
666c35295c51c3f212b7fc8179a29dbc8f1d7e70
| 777 |
py
|
Python
|
Python/Exercícios_Python/038_conversor_de_bases_numéricas.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Exercícios_Python/038_conversor_de_bases_numéricas.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
Python/Exercícios_Python/038_conversor_de_bases_numéricas.py
|
vdonoladev/aprendendo-programacao
|
83abbcd6701b2105903b28fd549738863418cfb8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""038 - Conversor de Bases Numéricas
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1pIfJpkGhz-bPEbuO2ea93R6Ba18s437l
"""
n = int(input('Digite um valor decimal: '))
escolha = int(input('Digite: \n- 1 para binário: \n- 2 para octal: \n- 3 para Hexadecimal: \nEscolha sua opção pelo Número: '))
if escolha == 1:
print('O Número {} decimal equivale a {} em Binário'.format(n, bin(n).strip('0b')))
elif escolha == 2:
print('O Número {} decimal equivale a {} em Octal'.format(n, oct(n).strip('0o').upper()))
elif escolha == 3:
print('O Número {} decimal equivale a {} em Hexadecimal'.format(n, hex(n).strip('0x').upper()))
else:
print('Você escolheu uma opção inválida.')
| 40.894737 | 127 | 0.676963 |
dd538e9575b667e388101cfd07c63c814e56ae91
| 746 |
py
|
Python
|
prune_tree.py
|
YueYvetteHao/Sandbox
|
db2b8b0e751bfb7d86e7f2506fb81a75f8c3a3e5
|
[
"MIT"
] | null | null | null |
prune_tree.py
|
YueYvetteHao/Sandbox
|
db2b8b0e751bfb7d86e7f2506fb81a75f8c3a3e5
|
[
"MIT"
] | null | null | null |
prune_tree.py
|
YueYvetteHao/Sandbox
|
db2b8b0e751bfb7d86e7f2506fb81a75f8c3a3e5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import dendropy
import sys
treefile = str(sys.argv[1])
tree = dendropy.Tree.get(path=treefile, schema="newick")
#print("Before:")
#print(tree.as_string(schema='newick'))
#print(tree.as_ascii_plot())
subspp = ["RRRR","PSEX", "PJEN", "PBIA", "PTET", "PQUA"]
#, "PTET", "PQUA"
taxons = []
for taxon in tree.taxon_namespace:
if taxon.label[0:4] in subspp:
# print (taxon.label)
taxons.append(taxon.label)
tree.retain_taxa_with_labels(taxons)
#tree.prune_taxa_with_labels(["PBIA.V1 4.1.P00220009","PDEC.223.1.P00070048","PSEX.AZ8 4.1.P0470047","PNOV.TE.1.P03730028"])
#print("After:")
#print(tree.as_string(schema='newick'))
print(tree.as_ascii_plot())
outtree = 'pruned_'+treefile
tree.write(path=outtree, schema="newick")
| 26.642857 | 124 | 0.715818 |
dd632ca1b8502767c1e572e2ad094813b01c037e
| 1,341 |
py
|
Python
|
src/onegov/org/models/publication.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/models/publication.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/models/publication.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import sedate
from datetime import datetime
from onegov.core.collection import GenericCollection
from onegov.file import File
from sqlalchemy import and_, text
class PublicationCollection(GenericCollection):
def __init__(self, session, year=None):
super().__init__(session)
self.year = year
@property
def model_class(self):
return File
def query(self):
query = super().query().filter(
self.model_class.published.is_(True),
self.model_class.publication.is_(True),
text("reference->>'content_type' = :content_type").bindparams(
content_type='application/pdf'
)
)
if self.year:
s = sedate.replace_timezone(datetime(self.year, 1, 1), 'UTC')
e = sedate.replace_timezone(datetime(self.year + 1, 1, 1), 'UTC')
query = query.filter(and_(s <= File.created, File.created < e))
return query
def for_year(self, year):
return self.__class__(self.session, year)
def first_year(self, timezone):
query = self.for_year(None).query()\
.with_entities(File.created)\
.order_by(File.created)
first_record = query.first()
if first_record:
return sedate.to_timezone(first_record.created, timezone).year
| 27.9375 | 77 | 0.625652 |
c6b98708c364897c069ab80620efcd99afb5edcb
| 368 |
py
|
Python
|
source/pkgsrc/games/py-renpy/patches/patch-module_setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pkgsrc/games/py-renpy/patches/patch-module_setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pkgsrc/games/py-renpy/patches/patch-module_setup.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
$NetBSD: patch-module_setup.py,v 1.2 2017/06/24 19:39:47 adam Exp $
* png from pkgsrc is libpng16.so
--- module/setup.py.orig 2014-08-05 01:19:58.000000000 +0000
+++ module/setup.py
@@ -75,7 +75,7 @@ include("libswscale/swscale.h")
include("GL/glew.h")
library("SDL")
-library("png")
+library("png16")
library("avformat")
library("avcodec")
library("avutil")
| 23 | 67 | 0.679348 |
c6c5ae95e75f590240d1fa37e17a600041652dd2
| 330 |
py
|
Python
|
___Python/Michael/p07_file_io_MA/m01_count_files.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Michael/p07_file_io_MA/m01_count_files.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Michael/p07_file_io_MA/m01_count_files.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
# Anzahl Ordner inkl. Unterordner
def count_dirs(path):
subdirs = [subdir for subdir in path.iterdir() if subdir.is_dir()]
return len(subdirs)
path = Path("O:\Spielwiese")
def alldirs(path):
if len(subdirs(path)) == 0:
return
return subdirs(path)
print(subdirs)
| 20.625 | 71 | 0.651515 |
05dcef9566df8c6588f858a036fd4fa4467a78bc
| 1,217 |
py
|
Python
|
Problems/Depth-First Search/medium/AllNodesDistanceKBT/all_nodes_distance_k_in_bt.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Problems/Depth-First Search/medium/AllNodesDistanceKBT/all_nodes_distance_k_in_bt.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/Depth-First Search/medium/AllNodesDistanceKBT/all_nodes_distance_k_in_bt.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def distanceK(self, root: TreeNode, target: TreeNode, k: int) -> List[int]:
ans = []
self.left, self.right = None, None
def find_target(cur_node: TreeNode, parent: TreeNode):
if not cur_node:
return None
if cur_node.val == target.val:
self.left = cur_node
self.right = parent
return
find_target(cur_node.left, TreeNode(cur_node.val, cur_node.right, parent))
find_target(cur_node.right, TreeNode(cur_node.val, parent, cur_node.left))
find_target(root, None)
def dfs(cur_node: TreeNode, travel: int):
if not cur_node:
return None
if travel == k:
ans.append(cur_node.val)
return
if travel > k:
return
dfs(cur_node.left, travel + 1)
dfs(cur_node.right, travel + 1)
dfs(self.left, 0)
dfs(self.right, 1)
return ans
| 25.893617 | 86 | 0.537387 |
af6ea748ade41afec09a7f0db0075d7bf098bcf6
| 448 |
py
|
Python
|
config.py
|
meerkat-code/meerkat_consul
|
983851b360f330ad258e15d45d98251e0fe21100
|
[
"MIT"
] | null | null | null |
config.py
|
meerkat-code/meerkat_consul
|
983851b360f330ad258e15d45d98251e0fe21100
|
[
"MIT"
] | 1 |
2018-07-18T16:36:12.000Z
|
2018-07-18T16:36:12.000Z
|
config.py
|
fjelltopp/meerkat_consul
|
983851b360f330ad258e15d45d98251e0fe21100
|
[
"MIT"
] | null | null | null |
class Config(object):
DEBUG = False
TESTING = False
PRODUCTION = False
LOGGING_LEVEL = "ERROR"
LOGGING_FORMAT = '%(asctime)s - %(levelname)-7s - %(module)s:%(filename)s:%(lineno)d - %(message)s'
COUNTRY_LOCATION_ID = 1
class Production(Config):
PRODUCTION = True
class Development(Config):
DEBUG = True
LOGGING_LEVEL = "DEBUG"
class Testing(Config):
TESTING = True
LOGGING_LEVEL = "WARNING"
| 15.448276 | 103 | 0.645089 |
af801afc38b1483bab4022ddc1eca05c64fd7dee
| 247 |
py
|
Python
|
Verzweigungen/Einfach/u_einfach.py
|
DietrichPaul/Einstieg-in-Python
|
0d28402f962773274d85e6bb169ae631c91f66ce
|
[
"CC0-1.0"
] | null | null | null |
Verzweigungen/Einfach/u_einfach.py
|
DietrichPaul/Einstieg-in-Python
|
0d28402f962773274d85e6bb169ae631c91f66ce
|
[
"CC0-1.0"
] | null | null | null |
Verzweigungen/Einfach/u_einfach.py
|
DietrichPaul/Einstieg-in-Python
|
0d28402f962773274d85e6bb169ae631c91f66ce
|
[
"CC0-1.0"
] | null | null | null |
# Eingabe
print("Geben Sie Ihr Bruttogehalt in Euro ein:")
brutto = float(input())
# Berechnung
if brutto > 2500:
steuer = brutto * 0.22
else:
steuer = brutto * 0.18
# Ausgabe
print("Es ergibt sich ein Steuerbetrag von", steuer, "Euro")
| 19 | 60 | 0.680162 |
a5b2e2336d105289cb8635838685c5ec230e2b9d
| 350 |
py
|
Python
|
config.py
|
ponyatov/ST
|
4aab2004608fe5f85366870c8387451f05451adc
|
[
"MIT"
] | null | null | null |
config.py
|
ponyatov/ST
|
4aab2004608fe5f85366870c8387451f05451adc
|
[
"MIT"
] | null | null | null |
config.py
|
ponyatov/ST
|
4aab2004608fe5f85366870c8387451f05451adc
|
[
"MIT"
] | null | null | null |
import os
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = b'\x80<C\x83\xf7\xb3Z\xfa\xedu,>\xbc\xec\xa1\xb1\r@i\x8b\x91)\xe7\x1f\xaat\xa6\xfb\x13\xea\x14\xa1\x10\xc4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
HOST = os.getenv('HOST', '127.0.0.1')
PORT = os.getenv('PORT', 12345)
| 31.818182 | 120 | 0.714286 |
59e01f315e48991105e3e55d9a955fdadfdc784e
| 15,691 |
py
|
Python
|
Packs/Pcysys/Integrations/Pcysys/Pcysys.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Pcysys/Integrations/Pcysys/Pcysys.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Pcysys/Integrations/Pcysys/Pcysys.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import jwt
import requests
import csv
import io
import json
from typing import List
from enum import Enum
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
HEADERS = {'Accept': 'application/json'}
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
HEALTH_URL_SUFFIX = '/health/dbChecks'
AUTH_URL_SUFFIX = '/auth/token'
LIST_TEMPLATES_URL_SUFFIX = '/api/v1/templates'
CANCEL_TASK_URL_SUFFIX = '/api/v1/taskRun/{taskRunId}/cancel'
GET_TASK_RUN_STATUS_URL_SUFFIX = '/api/v1/taskRun/{taskRunId}'
RUN_BULK_URL_SUFFIX = '/api/v1/template/runBulk'
EXPORT_CSV_URL_SUFFIX = '/api/v1/taskRun/{taskRunId}/fullActionReportCSV'
class Request(Enum):
POST = 'POST'
GET = 'GET'
class AuthorizationError(Exception):
pass
class Client(BaseClient):
def __init__(self, base_url: str, tgt: str, client_id: str, verify: bool, proxy: bool, headers):
super().__init__(base_url=f'{base_url}', headers=headers, verify=verify, proxy=proxy)
self.session = requests.Session()
self.session.headers = headers
self.client_id = client_id
self.tgt = tgt
self.access_token = str()
self.expiry = 0
self.load_session_parameters()
def load_session_parameters(self):
context: dict = get_integration_context()
if context and context['base_url'] == self._base_url:
self.tgt = context['tgt']
self.access_token = context['accessToken']
self.expiry = context['expiry']
def generic_request(self, method: str, url_suffix: str = None, full_url: str = None, headers: dict = None,
params: dict = None, data: dict = None, response_type: str = 'json'):
full_url = full_url if full_url else f'{self._base_url}{url_suffix}'
headers = headers if headers else self._headers
try:
res = self.session.request(
method,
full_url,
headers=headers,
verify=self._verify,
data=data,
params=params
)
demisto.debug(f'Got response: {res}')
if not res.ok:
status_code = res.status_code
if status_code == requests.status_codes.codes.UNAUTHORIZED: # pylint: disable=no-member
info = "Check that your system clock is set to the correct date and time before you try again."
raise AuthorizationError(f'Status code: {status_code}, reason: {res.text}. {info}')
raise ValueError(f'Error in API call to Pentera. Status code: {status_code}, reason: {res.text}')
try:
if response_type == 'json':
demisto.debug('result is JSON')
return res.json()
demisto.debug('result is TEXT')
return res.text
except Exception:
raise ValueError(
f'Failed to parse http response to JSON format. Original response body: \n{res.text}')
except requests.exceptions.ConnectTimeout as exception:
err_msg = 'Connection Timeout Error - potential reasons might be that the Server URL parameter' \
' is incorrect or that the Server is not accessible from your host.'
raise DemistoException(err_msg, exception)
except requests.exceptions.SSLError as exception:
err_msg = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' checkbox in' \
' the integration configuration.'
raise DemistoException(err_msg, exception)
except requests.exceptions.ProxyError as exception:
err_msg = 'Proxy Error - if the \'Use system proxy\' checkbox in the integration configuration is' \
' selected, try clearing the checkbox.'
raise DemistoException(err_msg, exception)
except requests.exceptions.ConnectionError as exception:
err_msg = getattr(exception, 'message', str(exception))
raise DemistoException(err_msg, exception)
except Exception as request_error:
message = getattr(request_error, 'message', str(request_error))
raise DemistoException(
f"Could not send request to Pentera, reason: {message}",
exception=request_error
)
def authenticate(self):
data = {
'client_id': self.client_id,
'tgt': self.tgt
}
res = self.generic_request(method=Request.POST.value, url_suffix=AUTH_URL_SUFFIX, data=data)
self.tgt = res.get('tgt')
self.access_token = res.get('token')
jwt_decode_dict = jwt.get_unverified_header(self.access_token)
self.expiry = jwt_decode_dict.get('exp', 0) if jwt_decode_dict else 0
self.save_session_parameters()
def save_session_parameters(self):
context = {
'base_url': self._base_url,
'tgt': self.tgt,
'accessToken': self.access_token,
'expiry': self.expiry
}
set_integration_context(context)
def is_access_token_valid(self):
if not self.access_token or not self.expiry or self.expiry < int(datetime.utcnow().timestamp()):
return False
return True
def create_basic_authentication_header(self):
authentication_headers = HEADERS.copy()
token = self.access_token + ':'
encoded_bytes = base64.b64encode(token.encode("utf-8"))
encoded_str = str(encoded_bytes, "utf-8")
authentication_headers['Authorization'] = 'Basic ' + encoded_str
return authentication_headers
def run_health_checks(self):
res = self.generic_request(method=Request.GET.value, url_suffix=HEALTH_URL_SUFFIX)
return res
def run_template_by_name(self, template_name):
headers = self.create_basic_authentication_header()
data = {
'templateNames': [template_name]
}
res = self.generic_request(method=Request.POST.value, url_suffix=RUN_BULK_URL_SUFFIX, headers=headers,
data=data)
return res
def get_task_run_status_by_task_run_id(self, task_run_id: str):
headers = self.create_basic_authentication_header()
url_suffix = GET_TASK_RUN_STATUS_URL_SUFFIX.format(taskRunId=task_run_id)
res = self.generic_request(method=Request.GET.value, url_suffix=url_suffix, headers=headers, data={})
task_status = res.get('taskRuns')[0]
return task_status
def get_task_run_full_action_report_by_task_run_id(self, task_run_id: str):
headers = self.create_basic_authentication_header()
url_suffix = EXPORT_CSV_URL_SUFFIX.format(taskRunId=task_run_id)
res = self.generic_request(method=Request.GET.value, url_suffix=url_suffix, headers=headers,
response_type='csv')
return res
def pentera_test_module_command(client: Client):
try:
response = client.run_health_checks()
except Exception as test_error:
message = getattr(test_error, 'message', str(test_error))
raise DemistoException(message)
exceptions: list = response.get('exceptions')
if exceptions:
raise DemistoException(", ".join(exceptions))
return 'ok'
def pentera_run_template_command(client: Client, args):
template_name = args.get('template_name')
try:
response = client.run_template_by_name(template_name)
task_run_json = response.get('taskRuns')[0]
parsed_response = parse_task_run_status(task_run_json)
readable_output = tableToMarkdown(template_name, parse_task_run_status(task_run_json),
removeNull=True)
return (
readable_output,
{'Pentera.TaskRun(val.ID == obj.ID)': parsed_response},
response # raw response - the original response
)
except Exception as run_template_error:
message = getattr(run_template_error, 'message', str(run_template_error))
raise DemistoException(
f"Could not run template with template_name: '{template_name}', reason: {message}",
exception=run_template_error
)
def pentera_get_task_run_status_command(client: Client, args):
task_run_id = args.get('task_run_id')
try:
task_run_status = client.get_task_run_status_by_task_run_id(task_run_id)
parsed_response = parse_task_run_status(task_run_status)
title = parsed_response['TemplateName'] + ': ' + parsed_response['Status']
readable_output = tableToMarkdown(title, parsed_response, removeNull=True)
return (
readable_output,
{'Pentera.TaskRun(val.ID == obj.ID)': parsed_response},
task_run_status # raw response - the original response
)
except Exception as status_error:
message = getattr(status_error, 'message', str(status_error))
raise DemistoException(
f"Could not get task run status for task_run_id: '{task_run_id}', reason: {message}",
exception=status_error
)
def pentera_get_task_run_full_action_report_command(client: Client, args):
def _convert_csv_file_to_dict(csv_file):
def _map_parameters_string_to_object(str_parameters: str = None):
if str_parameters:
return json.loads(str_parameters)
return None
csv_reader = csv.DictReader(io.StringIO(csv_file))
data = []
for row in csv_reader:
row_copy = row.copy()
converted_params = _map_parameters_string_to_object(row_copy.get('Parameters'))
if converted_params:
row_copy['Parameters'] = converted_params
data.append(row_copy)
return data
def _convert_full_action_report_time(full_action_report_list: List[dict]):
def _parse_date(full_date, separator):
if isinstance(full_date, str) and isinstance(separator, str):
date = full_date.split(separator)
if len(date) > 2:
first_arg = date[0]
second_arg = date[1]
third_arg = date[2]
return first_arg, second_arg, third_arg
res_list: List[dict] = []
for ordered_dict in full_action_report_list:
full_date_to_convert = ordered_dict['Time']
full_date_list = full_date_to_convert.split(' ')
year, month, day = _parse_date(full_date_list[0], '-')
hours, minutes, seconds = _parse_date(full_date_list[1], ':')
converted_date = year + '-' + month + '-' + day + 'T' + hours + ':' + minutes + ':' + seconds + 'Z'
new_ordered_dict = ordered_dict.copy()
new_ordered_dict['Time'] = converted_date
res_list.append(new_ordered_dict)
return res_list
entries = []
task_run_id = args.get('task_run_id')
try:
response_csv = client.get_task_run_full_action_report_by_task_run_id(task_run_id)
readable_output = f"# Pentera Report for TaskRun ID {task_run_id}"
entry = fileResult(f'penterascan-{task_run_id}.csv', response_csv, entryTypes['entryInfoFile'])
entry["HumanReadable"] = readable_output
entry["ContentsFormat"] = formats["markdown"]
entries.append(entry)
csv_dict = _convert_csv_file_to_dict(response_csv)
date_converted_csv_dict = _convert_full_action_report_time(csv_dict)
human_readable = tableToMarkdown(readable_output, date_converted_csv_dict)
entries.append({
"Type": entryTypes["note"],
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"Contents": date_converted_csv_dict,
"EntryContext": {
'Pentera.TaskRun(val.ID == obj.ID)': {
'FullActionReport': date_converted_csv_dict,
'ID': task_run_id
}
},
"HumanReadable": human_readable
})
return entries
except Exception as report_error:
message = getattr(report_error, 'message', str(report_error))
raise DemistoException(
f"Could not get full action report for task_run_id: '{task_run_id}', reason: {message}",
exception=report_error
)
def parse_task_run_status(json_response):
def _convert_time_in_millis_to_date_format(time_in_millis):
time_in_date_format = None
try:
time_in_date_format = datetime.fromtimestamp(float(time_in_millis) / 1000).strftime(DATE_FORMAT)
return time_in_date_format
except TypeError:
return time_in_date_format
if isinstance(json_response, dict):
end_time_date_format = _convert_time_in_millis_to_date_format(json_response.get('endTime'))
start_time_date_format = _convert_time_in_millis_to_date_format(json_response.get('startTime'))
parsed_json_response = {
'ID': json_response.get('taskRunId'),
'TemplateName': json_response.get('taskRunName'),
'StartTime': start_time_date_format,
'EndTime': end_time_date_format,
'Status': json_response.get('status'),
}
return parsed_json_response
def pentera_authentication(client: Client):
if not client.is_access_token_valid():
try:
client.authenticate()
except Exception as auth_error:
message = getattr(auth_error, 'message', str(auth_error))
raise DemistoException(
f"Could not authenticate to Pentera, reason: {message}",
exception=auth_error
)
def increase_csv_field_size_limit():
"""
This method will try to increase the csv field size limit as files might contain huge fields.
:return: None
"""
try:
csv.field_size_limit(sys.maxsize)
except OverflowError:
pass
def main():
params: dict = demisto.params()
application_port = params['port']
base_url = params['url'].rstrip('/') + ':' + application_port
client_id = params['clientId']
tgt = params['tgt']
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
client = Client(
base_url=base_url,
tgt=tgt,
verify=verify_certificate,
client_id=client_id,
proxy=proxy,
headers=HEADERS
)
command = demisto.command()
demisto.debug(f'Got command: {command}')
try:
if demisto.command() == 'test-module':
demisto.results(pentera_test_module_command(client))
else:
pentera_authentication(client)
if demisto.command() == 'pentera-run-template-by-name':
return_outputs(*pentera_run_template_command(client, demisto.args()))
elif demisto.command() == 'pentera-get-task-run-status':
return_outputs(*pentera_get_task_run_status_command(client, demisto.args()))
elif demisto.command() == 'pentera-get-task-run-full-action-report':
demisto.results(pentera_get_task_run_full_action_report_command(client, demisto.args()))
except Exception as e:
return_error(f'Failed to execute command: {command}, {getattr(e, "message", str(e))}', error=e)
if __name__ in ('__main__', '__builtin__', 'builtins'):
increase_csv_field_size_limit()
main()
| 40.968668 | 115 | 0.641132 |
758143c8edb9ac7caeb03ce2169a369f0c3f1c03
| 899 |
py
|
Python
|
codeit/algorithm/max_profit_memo.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
codeit/algorithm/max_profit_memo.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
codeit/algorithm/max_profit_memo.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
def max_profit_memo(price_list, count, cache):
# base case
if count in [0, 1]:
return price_list[count]
if count in cache:
return cache[count]
if count < len(price_list):
profit = price_list[count]
else:
profit = 0
# recursive case
for i in range(1, count // 2 + 1):
profit = max(profit,
max_profit_memo(price_list, i , cache) +
max_profit_memo(price_list, count - i, cache))
cache[count] = profit
return profit
def max_profit(price_list, count):
max_profit_cache = {}
return max_profit_memo(price_list, count, max_profit_cache)
if __name__ == '__main__':
count = 10
for i in range(count):
price_list = [0, 100, 400, 800, 900, 1000, 1400, 1600, 2100, 2200]
my_result = max_profit(price_list, i)
print(f'{i}th result: {my_result}')
| 25.685714 | 74 | 0.596218 |
dde4ccb40e6afb779d2bf1b372bce3d68a2e2d61
| 266 |
py
|
Python
|
Crashkurs TensorFlow/09_range.py
|
slogslog/Kurzgeschichten-in-CSharp
|
3918c4174220e558cdeeada0edac941811418b93
|
[
"Unlicense"
] | 2 |
2019-03-15T20:48:34.000Z
|
2019-04-22T15:24:09.000Z
|
Crashkurs TensorFlow/09_range.py
|
slogslog/Coding-Kurzgeschichten
|
9b08237038147c6c348d4cf4c69567178e07dd1d
|
[
"Unlicense"
] | null | null | null |
Crashkurs TensorFlow/09_range.py
|
slogslog/Coding-Kurzgeschichten
|
9b08237038147c6c348d4cf4c69567178e07dd1d
|
[
"Unlicense"
] | null | null | null |
# Unterdrückt die AVX2 Warnung
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
a = tf.range(1,21)
print(a.numpy())
b = tf.range(10,0,-2)
print(b.numpy())
c = tf.range(5., 13., 0.5)
print(c.numpy())
d = tf.linspace(-2., 3., 20)
print(d)
| 14.777778 | 38 | 0.650376 |
fb24e3d9d3f133d9100fe9d04185373d283d05ff
| 4,917 |
py
|
Python
|
packages/watchmen-storage/src/watchmen_storage/competitive_worker_id_generator.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-storage/src/watchmen_storage/competitive_worker_id_generator.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-storage/src/watchmen_storage/competitive_worker_id_generator.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from abc import abstractmethod
from datetime import datetime
from enum import Enum
from logging import getLogger
from os import getpid
from random import randrange
from socket import AF_INET, SOCK_DGRAM, socket
from threading import Thread
from typing import Callable, List, Optional
from time import sleep
from watchmen_model.common import Storable
from watchmen_utilities import get_current_time_in_seconds
from .snowflake_worker_id_generator import WorkerIdGenerator
class WorkerFirstDeclarationException(Exception):
pass
class WorkerCreationException(Exception):
pass
class WorkerDeclarationException(Exception):
pass
def get_host_ip() -> str:
s = None
ip = None
try:
s = socket(AF_INET, SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
if s is not None:
s.close()
return ip
class CompetitiveWorker(Storable):
ip: Optional[str] = get_host_ip()
processId: Optional[str] = str(getpid())
dataCenterId: int = None
workerId: int = None
registeredAt: Optional[datetime] = get_current_time_in_seconds()
lastBeatAt: datetime = None
def default_heart_beat_interval() -> int:
"""
:return: in seconds
"""
return 60
def default_worker_creation_retry_times() -> int:
return 3
class CompetitiveWorkerShutdownSignal(Enum):
EXIT = 1,
EXCEPTION_RAISED = 2,
CompetitiveWorkerRestarter = Callable[[], None]
CompetitiveWorkerShutdownListener = Callable[
[CompetitiveWorkerShutdownSignal, int, int, CompetitiveWorkerRestarter], None
]
class CompetitiveWorkerIdGenerator:
worker: CompetitiveWorker = None
firstDeclareTimes: int = 0
def __init__(
self,
data_center_id: int = 0,
heart_beat_interval: int = default_heart_beat_interval(),
worker_creation_retry_times: int = default_worker_creation_retry_times(),
shutdown_listener: CompetitiveWorkerShutdownListener = None
):
# will not check sanity of data center id here
self.dataCenterId = data_center_id
self.heartBeatInterval = heart_beat_interval
self.workerCreationRetryTimes = worker_creation_retry_times
self.handleShutdown = shutdown_listener
self.try_create_worker()
@abstractmethod
def first_declare_myself(self, worker: CompetitiveWorker) -> None:
"""
first declare me, implement me
"""
pass
def create_worker(self):
# create a worker
try:
self.firstDeclareTimes += 1
worker = CompetitiveWorker(dataCenterId=self.dataCenterId, workerId=self.create_worker_id())
self.first_declare_myself(worker)
return worker
except WorkerFirstDeclarationException:
if self.firstDeclareTimes <= self.workerCreationRetryTimes:
return self.create_worker()
else:
raise WorkerCreationException(
f'Failed to create worker[dataCenterId={self.dataCenterId}], '
f'reaches maximum retry times[{self.workerCreationRetryTimes}]')
def try_create_worker(self):
self.firstDeclareTimes = 0
self.worker = self.create_worker()
del self.firstDeclareTimes
# start heart beat
Thread(target=CompetitiveWorkerIdGenerator.heart_beat, args=(self,), daemon=True).start()
@staticmethod
def random_worker_id() -> int:
return randrange(0, 1024)
@abstractmethod
def acquire_alive_worker_ids(self) -> List[int]:
"""
acquire used worker ids, implement me
:return: used worker ids
"""
pass
def create_worker_id(self) -> int:
alive_worker_ids = self.acquire_alive_worker_ids()
# random a worker id, return it when it is not used
new_worker_id = CompetitiveWorkerIdGenerator.random_worker_id()
while new_worker_id in alive_worker_ids:
new_worker_id = CompetitiveWorkerIdGenerator.random_worker_id()
# return
return new_worker_id
@abstractmethod
def declare_myself(self, worker: CompetitiveWorker) -> None:
"""
declare me is alive, implement me
"""
pass
# noinspection PyUnreachableCode
def heart_beat(self):
try:
while True:
self.declare_myself(self.worker)
sleep(self.heartBeatInterval)
except Exception as e:
getLogger(__name__).error(e, exc_info=True, stack_info=True)
self.handleShutdown(
CompetitiveWorkerShutdownSignal.EXCEPTION_RAISED,
self.worker.dataCenterId, self.worker.workerId,
self.try_create_worker
)
else:
# heart beat stopped with no exception, release signal
self.handleShutdown(
CompetitiveWorkerShutdownSignal.EXIT,
self.worker.dataCenterId, self.worker.workerId,
self.try_create_worker
)
finally:
# release in-memory worker, will raise exception only if somebody calls me later
del self.worker
getLogger(__name__).warning(f'Competitive worker id generator[{self.worker}] heart beat stopped.')
def generate(self) -> int:
"""
generate snowflake worker id
"""
return self.worker.workerId
def competitive_worker_id(generator: CompetitiveWorkerIdGenerator) -> WorkerIdGenerator:
"""
create a worker id generator which delegate to given competitive generator
"""
return lambda: generator.generate()
| 26.435484 | 101 | 0.767541 |
fb39e81cc76189e1ff84b8f4859a0a5d79c71246
| 23,315 |
py
|
Python
|
Packs/PhishLabs/Integrations/PhishLabsIOC_DRP/PhishLabsIOC_DRP.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/PhishLabs/Integrations/PhishLabsIOC_DRP/PhishLabsIOC_DRP.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/PhishLabs/Integrations/PhishLabsIOC_DRP/PhishLabsIOC_DRP.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
""" IMPORTS """
# Std imports
from datetime import datetime
# 3-rd party imports
from typing import Dict, Tuple, Union, Optional, List, Any, AnyStr
import urllib3
# Local imports
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
"""GLOBALS/PARAMS
Attributes:
INTEGRATION_NAME:
Name of the integration as shown in the integration UI, for example: Microsoft Graph User.
INTEGRATION_COMMAND_NAME:
Command names should be written in all lower-case letters,
and each word separated with a hyphen, for example: msgraph-user.
INTEGRATION_CONTEXT_NAME:
Context output names should be written in camel case, for example: MSGraphUser.
"""
INTEGRATION_NAME = 'PhishLabs IOC - DRP'
INTEGRATION_COMMAND_NAME = 'phishlabs-ioc-drp'
INTEGRATION_CONTEXT_NAME = 'PhishLabsIOC'
# Disable insecure warnings
urllib3.disable_warnings()
class Client(BaseClient):
def test_module(self) -> Dict:
"""Performs basic GET request to check if the API is reachable and authentication is successful.
Returns:
Response json
"""
return self.get_cases(max_records=2)
def travel_to_end_date(self, cases_temp: List, params: Dict, end_date: Optional[str], date_field: str, suffix: str) \
-> Tuple[List[Any], Dict[Any, Any], int, Optional[datetime]]:
"""Moving index to starting point, if neccery chage cases_temp (more get request)
Args:
cases_temp: case as starting point assuming list sorted by date
params: query params
end_date: end date of the query searched
date_field: date field to apply end date filter
suffix: suffix of url
Returns:
Tuple of (list of cases temp, modified params, index in cases, datetime object of last run in traveling)
"""
format_time = "%Y-%m-%dT%H:%M:%SZ"
last_time: Optional[datetime] = None if not cases_temp else datetime.strptime(cases_temp[0].get(date_field),
format_time)
end_date_obj: datetime = datetime.strptime(end_date, format_time) if end_date else datetime.now()
index = 0
while end_date_obj and last_time:
if end_date_obj < last_time:
if len(cases_temp) == index + 1:
params['offset'] += len(cases_temp)
cases_temp = self._http_request('GET',
url_suffix=suffix,
params=assign_params(**params),
timeout=20).get('data', [])
index = 0
if not cases_temp:
break
else:
index += 1
last_time = datetime.strptime(cases_temp[index].get(date_field), format_time)
else:
break
return cases_temp, params, index, last_time
def travel_to_begin_date(self, cases_temp: List, index: int, params: Dict, begin_date: Optional[str],
last_time: Optional[datetime], date_field: str, max_records: Union[str, int], suffix: str) \
-> List[Dict[Any, Any]]:
"""
Args:
suffix: suffix of url
cases_temp: case as starting point assuming list sorted by date
index: current traveling point in travelling
params: query params
begin_date: begin date to move while traveling case
last_time: last time of last case visited
date_field: date field to apply end date filter
max_records: max records to get in this query
suffix: suffix of url
Returns:
List of cases filtered by date
"""
format_time = "%Y-%m-%dT%H:%M:%SZ"
begin_date_obj: Optional[datetime] = datetime.strptime(begin_date, format_time) if begin_date else None
cases: List = []
while cases_temp and len(cases) < int(max_records) and last_time:
if begin_date_obj:
if last_time > begin_date_obj:
cases.append(cases_temp[index])
else:
break
else:
cases.append(cases_temp[index])
if len(cases) == max_records:
break
elif len(cases_temp) == index + 1:
params['offset'] += len(cases_temp)
cases_temp = self._http_request('GET',
url_suffix=suffix,
params=assign_params(**params),
timeout=20).get('data', [])
index = 0
if not cases_temp:
break
else:
index += 1
last_time = datetime.strptime(cases_temp[index].get(date_field), format_time)
return cases
def get_cases(self, status: Optional[str] = None, case_type: Optional[str] = None,
max_records: Union[str, int] = 20, offset: Union[str, int] = 0,
date_field: str = 'dateModified', begin_date: Optional[str] = None,
end_date: Optional[str] = None, query_type: str = '', period: Optional[str] = None) -> Dict:
"""
Query the specified kwargs with default parameters if not defined
Args:
status: Filter cases based on the case status
case_type: Filter cases by case type
max_records: Maximum number of cases to return, default is 20, maximum is 200
offset: Paginate results used in conjunction with maxRecords
date_field: Field to use to query using dateBegin and dateEnd parameters.
begin_date: Date query beginning date
end_date: Date query beginning date
query_type: query type influence on suffix - all/open/closed
period: timestamp (<number> <time unit>, e.g., 12 hours, 7 days)
Returns:
Response JSON as dictionary
"""
if period:
begin_date, end_date = parse_date_range(date_range=period,
date_format='%Y-%m-%dT%H:%M:%SZ')
suffix: str = f'/cases/{query_type}' if query_type else '/cases'
params: Dict = {
'status': status,
'type': case_type,
'offset': int(offset),
'maxRecords': int(max_records)
}
raw_response: Dict = self._http_request('GET',
url_suffix=suffix,
params=assign_params(**params),
timeout=20)
cases_temp: List = raw_response.get('data', [])
# About the drop some mean regex right now disable-secrets-detection-start
cases_temp, params, index, last_time = self.travel_to_end_date(cases_temp, params, end_date, date_field, suffix)
cases = self.travel_to_begin_date(cases_temp, index, params, begin_date, last_time, date_field, max_records,
suffix)
# Drops the mic disable-secrets-detection-end
raw_response['header']['returnResult'] = len(cases)
raw_response['header']['totalResult'] = len(cases)
raw_response['header']['queryParams']['maxRecords'] = len(cases)
raw_response['data'] = cases
return raw_response
def get_case_by_id(self, case_id: str) -> Dict:
"""Query incident by ID
Args:
case_id: ID of the case
Returns:
Response JSON as dictionary
"""
suffix = f"/cases/{case_id}"
return self._http_request('GET',
url_suffix=suffix)
''' HELPER FUNCTIONS '''
@logger
def indicator_ec(indicator: Dict, type_ec: AnyStr) -> Dict:
"""indicator convert to ec format
Get an indicator from raw response and concert to demisto entry context format
Args:
indicator: raw response dictionary
type_ec: type of entry context
Returns:
indicator entry context
"""
ec: Dict = {}
if type_ec == 'AttackSources':
ec = {
'URL': indicator.get('url'),
'UrlType': indicator.get('urlType'),
'IP': indicator.get('ipAddress'),
'ISP': indicator.get('isp'),
'Country': indicator.get('country'),
'TargetedBrands': indicator.get('targetedBrands'),
'FQDN': indicator.get('fqdn'),
'Domain': indicator.get('domain'),
'IsMaliciousDomain': indicator.get('isMaliciousDomain'),
'WhoIs': {
'Registrant': indicator.get('whois', {}).get('registrant'),
'Registration': {
'Created': indicator.get('whois', {}).get('registration', {}).get('created'),
'Expires': indicator.get('whois', {}).get('registration', {}).get('expires'),
'Updated': indicator.get('whois', {}).get('registration', {}).get('updated'),
'Registrar': indicator.get('whois', {}).get('registration', {}).get('registrar'),
'NameServers': indicator.get('whois', {}).get('name_servers')
},
}
}
elif type_ec == 'Attachments':
ec = {
'ID': indicator.get('id'),
'Type': indicator.get('type'),
'Description': indicator.get('description'),
'DateAdded': indicator.get('dateAdded'),
'FileName': indicator.get('fileName'),
'FileURL': indicator.get('fileURL')
}
elif type_ec == 'AssociatedURLs':
ec = {
'URL': indicator.get('url'),
'UrlType': indicator.get('urlType'),
'TargetedBrands': indicator.get('targetedBrands'),
'WhoIs': {
'Registrant': indicator.get(''),
'Registration': {
'Created': indicator.get('whois', {}).get('registration', {}).get('created'),
'Expires': indicator.get('whois', {}).get('registration', {}).get('expires'),
'Updated': indicator.get('whois', {}).get('registration', {}).get('updated'),
'Registrar': indicator.get('whois', {}).get('registration', {}).get('registrar'),
'NameServers': indicator.get('whois', {}).get('name_servers')
}
}
}
return assign_params(**ec)
@logger
def indicators_to_list_ec(indicators: List, type_ec: AnyStr) -> Union[Tuple[List, List], List]:
"""Unpack list of incidents to demisto ec format
Convert list of incidents from raw response to demisto entry context format lists
Args:
indicators: lit of indicators from raw response
type_ec: type of indicators
Returns:
List of indicators entry context
"""
ecs: List = []
for indicator in indicators:
ec = indicator_ec(indicator, type_ec)
ecs.append(ec)
return ecs
@logger
def raw_response_to_context(cases: Union[List, Any]) -> List:
"""
Convert incidents list from raw response to demisto entry context list format
Args:
cases: Incidents list
Returns:
Entry contexts of phishLabs, emails, files, urls, dbotScores
"""
phishlabs_ec: List = []
for case in cases:
# PhishLabs entry context
phishlabs: Dict = {
'CaseID': case.get('caseId'),
'Title': case.get('title'),
'Description': case.get('description'),
'CaseNumber': case.get('caseNumber'),
'Resolution': case.get('resolution'),
'ResolutionStatus': case.get('resolutionStatus'),
'CreatedBy': {
'ID': case.get('createdBy', {}).get('id'),
'Name': case.get('createdBy', {}).get('name'),
'DisplayName': case.get('createdBy', {}).get('displayName')
},
'Brand': case.get('brand'),
'Email': case.get('emailAddress'),
'CaseType': case.get('caseType'),
'CaseStatus': case.get('caseStatus'),
'DateCreated': case.get('dateCreated'),
'DateClosed': case.get('dateClosed'),
'DateModified': case.get('dateModified'),
'Customer': case.get('customer'),
'AttackSources': indicators_to_list_ec(indicators=case.get('attackSources', []), type_ec='AttackSources'),
'Attachments': indicators_to_list_ec(indicators=case.get('attachments', []), type_ec='Attachments'),
'ApplicationName': case.get('applicationName'),
'Platform': case.get('platform'),
'Severity': case.get('severity'),
'Developer': case.get('developer'),
'DeveloperWebsite': case.get('developerWebsite'),
'ApplicationDescription': case.get('applicationDescripion'),
'Language': case.get('language'),
'Hardware': case.get('hardware'),
'Phone': case.get('phoneNumber'),
'AssociatedURLs': indicators_to_list_ec(indicators=case.get('associatedURLs', []), type_ec='AssociatedURLs')
}
phishlabs_ec.append(assign_params(**phishlabs))
return phishlabs_ec
''' COMMANDS '''
@logger
def test_module_command(client: Client, *_) -> Tuple[None, None, str]:
"""Performs a basic GET request to check if the API is reachable and authentication is successful.
Args:
client: Client object with request
*_: Usually demisto.args()
Returns:
'ok' if test successful.
Raises:
DemistoException: If test failed.
"""
results = client.test_module()
if 'data' in results:
return None, None, 'ok'
raise DemistoException(f'Test module failed, {results}')
@logger
def fetch_incidents_command(
client: Client,
fetch_time: str,
max_records: Union[str, int],
date_field: str = 'dateModified',
last_run: Optional[str] = None) -> Tuple[List[Dict[str, Any]], Dict]:
"""Uses to fetch incidents into Demisto
Documentation: https://github.com/demisto/content/tree/master/docs/fetching_incidents
Args:
date_field: filter date is by dateCreated / dateClosed / dateModified
client: Client object with request
fetch_time: From when to fetch if first time, e.g. `3 days`
max_records: limit of incidents in a fetch
last_run: Last fetch object occurs.
Returns:
incidents, new last_run
"""
occurred_format = '%Y-%m-%dT%H:%M:%SZ'
if not last_run:
datetime_new_last_run, _ = parse_date_range(date_range=fetch_time,
date_format=occurred_format)
else:
datetime_new_last_run = last_run
raw_response = client.get_cases(begin_date=datetime_new_last_run,
date_field=date_field,
max_records=max_records)
cases_raw: List = raw_response.get('data', [])
cases_report = []
if cases_raw:
datetime_new_last_run = cases_raw[0].get(date_field)
for case in cases_raw:
cases_report.append({
'name': f"{INTEGRATION_NAME}: {case.get('caseId')}",
'occurred': case.get(date_field),
'rawJSON': json.dumps(case)
})
return cases_report, datetime_new_last_run
@logger
def get_cases_command(client: Client, **kwargs: Dict) -> Tuple[object, dict, Union[List, Dict]]:
"""Get all case by filters and return outputs in Demisto's context entry
Args:
client: Client object with request
kwargs: Usually demisto.args()
Returns:
human readable (markdown format), entry context and raw response
"""
raw_response: Dict = client.get_cases(**kwargs) # type: ignore
if raw_response:
title = f'{INTEGRATION_NAME} - cases'
phishlabs_ec = raw_response_to_context(raw_response.get('data', []))
context_entry: Dict = {
f'{INTEGRATION_CONTEXT_NAME}(val.DRP.CaseID && val.EIR.CaseID === obj.DRP.CaseID && '
f'val.DRP.DateModified && val.DRP.DateModified === obj.DRP.DateModified)': {
'DRP': phishlabs_ec
}
}
human_readable = tableToMarkdown(name=title,
t=phishlabs_ec,
headers=['CaseID', 'Title', 'CaseStatus', 'DateCreated', 'Resolution',
'ResolutionStatus', 'CreatedBy'],
removeNull=True)
return human_readable, context_entry, raw_response
else:
return f'{INTEGRATION_NAME} - Could not find any results for given query', {}, {}
@logger
def get_case_by_id_command(client: Client, **kwargs: Dict) -> Tuple[object, dict, Union[List, Dict]]:
"""Get case by ID and return outputs in Demisto's context entry
Args:
client: Client object with request
kwargs: Usually demisto.args()
Returns:
human readable (markdown format), entry context and raw response
"""
raw_response: Dict = client.get_case_by_id(**kwargs) # type: ignore
if raw_response:
title = f'{INTEGRATION_NAME} - case ID {kwargs.get("caseid")}'
phishlabs_ec = raw_response_to_context(raw_response.get('data', []))
context_entry: Dict = {
f'{INTEGRATION_CONTEXT_NAME}(val.DRP.CaseID && val.EIR.CaseID === obj.DRP.CaseID && '
f'val.DRP.DateModified && val.DRP.DateModified === obj.DRP.DateModified)': {
'DRP': phishlabs_ec
}
}
human_readable = tableToMarkdown(name=title,
t=phishlabs_ec,
headers=['CaseID', 'Title', 'CaseStatus', 'DateCreated', 'Resolution',
'ResolutionStatus', 'CreatedBy'],
removeNull=True)
return human_readable, context_entry, raw_response
else:
return f'{INTEGRATION_NAME} - Could not find any results for given query', {}, {}
@logger
def get_open_cases_command(client: Client, **kwargs: Dict) -> Tuple[object, dict, Union[List, Dict]]:
"""Get all open case by filters and return outputs in Demisto's context entry
Args:
client: Client object with request
kwargs: Usually demisto.args()
Returns:
human readable (markdown format), entry context and raw response
"""
raw_response: Dict = client.get_cases(**kwargs, query_type='open') # type: ignore
if raw_response:
title = f'{INTEGRATION_NAME} - open cases'
phishlabs_ec = raw_response_to_context(raw_response.get('data', []))
context_entry: Dict = {
f'{INTEGRATION_CONTEXT_NAME}(val.DRP.CaseID && val.EIR.CaseID === obj.DRP.CaseID && '
f'val.DRP.DateModified && val.DRP.DateModified === obj.DRP.DateModified)': {
'DRP': phishlabs_ec
}
}
human_readable = tableToMarkdown(name=title,
t=phishlabs_ec,
headers=['CaseID', 'Title', 'CaseStatus', 'DateCreated', 'Resolution',
'ResolutionStatus', 'CreatedBy'],
removeNull=True)
return human_readable, context_entry, raw_response
else:
return f'{INTEGRATION_NAME} - Could not find any results for given query', {}, {}
@logger
def get_closed_cases_command(client: Client, **kwargs: Dict) -> Tuple[object, dict, Union[List, Dict]]:
"""Get all closed case by filters and return outputs in Demisto's context entry
Args:
client: Client object with request
kwargs: Usually demisto.args()
Returns:
human readable (markdown format), entry context and raw response
"""
raw_response: Dict = client.get_cases(**kwargs, query_type='closed') # type: ignore
if raw_response:
title = f'{INTEGRATION_NAME} - Closed cases'
phishlabs_ec = raw_response_to_context(raw_response.get('data', []))
context_entry: Dict = {
f'{INTEGRATION_CONTEXT_NAME}(val.DRP.CaseID && val.EIR.CaseID === obj.DRP.CaseID && '
f'val.DRP.DateModified && val.DRP.DateModified === obj.DRP.DateModified)': {
'DRP': phishlabs_ec
}
}
human_readable = tableToMarkdown(name=title,
t=phishlabs_ec,
headers=['CaseID', 'Title', 'CaseStatus', 'DateCreated', 'Resolution',
'ResolutionStatus', 'CreatedBy'],
removeNull=True)
return human_readable, context_entry, raw_response
else:
return f'{INTEGRATION_NAME} - Could not find any results for given query', {}, {}
''' COMMANDS MANAGER / SWITCH PANEL '''
def main():
params = demisto.params()
base_url = urljoin(params.get('url'), '/v1/data')
verify_ssl = not params.get('insecure', False)
proxy = params.get('proxy')
client = Client(
base_url=base_url,
verify=verify_ssl,
proxy=proxy,
auth=(params.get('credentials', {}).get('identifier'),
params.get('credentials', {}).get('password'))
)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'test-module': test_module_command,
f'{INTEGRATION_COMMAND_NAME}-get-cases': get_cases_command,
f'{INTEGRATION_COMMAND_NAME}-get-case-by-id': get_case_by_id_command,
f'{INTEGRATION_COMMAND_NAME}-get-open-cases': get_open_cases_command,
f'{INTEGRATION_COMMAND_NAME}-get-closed-cases': get_closed_cases_command
}
try:
if command == 'fetch-incidents':
incidents, new_last_run = fetch_incidents_command(client,
fetch_time=params.get('fetchTime'),
last_run=demisto.getLastRun().get('lastRun'),
max_records=params.get('fetchLimit'),
date_field=params.get('fetchByDate'))
demisto.incidents(incidents)
demisto.setLastRun({'lastRun': new_last_run})
else:
readable_output, outputs, raw_response = commands[command](client=client, **demisto.args())
return_outputs(readable_output, outputs, raw_response)
except Exception as e:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]'
return_error(err_msg, error=e)
if __name__ == 'builtins':
main()
| 41.119929 | 121 | 0.567532 |
34d7c7c5eb13220926df4c119b7410eecc7ea9c7
| 7,951 |
py
|
Python
|
MapMaker.py
|
Turidus/Minecraft-MapMaker
|
16e4015d03d67f04cfd247c11c9e3e2a5429b79f
|
[
"MIT"
] | 1 |
2018-10-24T16:02:08.000Z
|
2018-10-24T16:02:08.000Z
|
MapMaker.py
|
Turidus/Minecraft-MapMaker
|
16e4015d03d67f04cfd247c11c9e3e2a5429b79f
|
[
"MIT"
] | 2 |
2018-07-16T19:04:00.000Z
|
2018-07-29T11:43:53.000Z
|
MapMaker.py
|
Turidus/Minecraft-MapMaker
|
16e4015d03d67f04cfd247c11c9e3e2a5429b79f
|
[
"MIT"
] | null | null | null |
"""
Manger for the differnt calculations needed to provided the requestet data.
See Readme for details.
Can be used directly with a command line tool or with a GUI.
Made by Turidus https://github.com/Turidus/Minecraft-MapMaker
Copyright (c) 2018 Turidus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import argparse
import re
import itertools
import Parsers
import Saving
import MapColorIDGenerator
def MapMaker(args, outPrioQueue = None):
"""
Manages the creation of the specified data.
param: args: A struct like class that provides a field for every possible option.
Needed fields and their default values are:
class Args():
pathToImage = None
bl = []
name = None
twoD = False
p = True
bp = True
ba = True
s = True
minY = 4
maxY = 250
maxS = 129
v = False
outPrioQueue: A queue.PriorityQueue(). If provided, this will be used as output channel.
If None, the output uses print().
Exception: Raises IOError, VauleError
"""
#Managing communication with GUI
prioCounter = itertools.count()
def print2(tulpe): print(tulpe[1])
if outPrioQueue == None:
newPrint = print2
else:
newPrint = outPrioQueue.put
#Settings
if args.v:
try:
with open("version") as vFile:
newPrint ((prioCounter.__next__(), vFile.read()))
except IOError:
newPrint((prioCounter.__next__(), "Version file not found"))
newPrint((prioCounter.__next__(),"Setting up"))
imagePath = os.path.abspath(args.pathToImage)
if not os.path.isfile(imagePath):
raise IOError("path does not point at a file")
if args.n == None:
imageName = os.path.split(os.path.splitext(imagePath)[0])[1]
else:
imageName = re.sub(r'[^a-zA-Z0-9_]', '', args.n)
if args.twoD:
mapColorIDDic = MapColorIDGenerator.mapColorIDGenerator2D(args.bl)
else:
mapColorIDDic = MapColorIDGenerator.mapColorIDGenerator3D(args.bl)
positionMatrixMinY = int(args.minY) if args.minY else 4
positionMatrixMaxY = int(args.maxY) if args.maxY else 250
if 0 > positionMatrixMinY or positionMatrixMinY > 251:
raise ValueError("minY is smaller 0 or bigger 251")
if 4 > positionMatrixMaxY or positionMatrixMaxY > 255:
raise ValueError("maxY is smaller 4 or bigger 255")
if positionMatrixMinY >= positionMatrixMaxY - 3:
raise ValueError("minY and maxY are to close toadd_gether (closer than 4) or minY is bigger than maxY")
maxSchematicSize = int(args.maxS) if args.maxS else 129
if maxSchematicSize < 1:
raise ValueError("maxS is smaller than 1")
elif maxSchematicSize > 129:
newPrint((prioCounter.__next__(),"Your schematic size is bigger 129. be careful when importing such large schematics"))
newPrint((prioCounter.__next__(),"Finished setting up"))
#Calculating intermediaries
newPrint((prioCounter.__next__(),"Calculating rgbMatrix"))
rgbMatrix = Parsers.imageFileToRGBMatrix(imagePath)
newPrint((prioCounter.__next__(),"Done"))
newPrint((prioCounter.__next__(),"Calculating mapColorIDMatrix"))
mapColorIDMatrix = Parsers.rgbMatrixTomapColorID(rgbMatrix,mapColorIDDic)
newPrint((prioCounter.__next__(),"Done"))
if args.bp or args.s:
newPrint((prioCounter.__next__(),"Calculating positionMatrix"))
positionMatrix = Parsers.mapColorIDToPositionMatrix(mapColorIDMatrix, positionMatrixMinY, positionMatrixMaxY)
newPrint((prioCounter.__next__(),"Done"))
if args.s:
newPrint((prioCounter.__next__(), "Calculating Schematic"))
tag_Compound_List = Parsers.positionMatrixToTag_CompoundList(positionMatrix, mapColorIDDic, positionMatrixMinY, positionMatrixMaxY, maxSchematicSize)
newPrint((prioCounter.__next__(),"Done"))
#Calculating and saving results
if args.ba:
newPrint((prioCounter.__next__(),"Saving AmountTXT"))
Saving.saveAmountTxT(mapColorIDMatrix,mapColorIDDic,imageName)
if args.bp:
newPrint((prioCounter.__next__(),"Saving PositionTXT"))
Saving.saveBlockPositionTxT(positionMatrix,mapColorIDDic, imageName)
if args.p:
newPrint((prioCounter.__next__(),"Saving Image"))
Saving.saveImage(mapColorIDMatrix, mapColorIDDic, imageName)
if args.s:
newPrint((prioCounter.__next__(),"Saving Schematic"))
Saving.saveSchematic(tag_Compound_List, imageName)
newPrint((prioCounter.__next__(),"Finished with this image"))
if __name__ == "__main__":
cmdparser = argparse.ArgumentParser(description="This procesess image files into multiple files\nthat help to build minecraft ingame maps.")
cmdparser.add_argument("pathToImage", help="The path to the image that should be processed\n")
cmdparser.add_argument("-bl", nargs="+", help="Optional list of BaseColorIDs that should not be used\n")
cmdparser.add_argument("-n", help = "Optional name for the resulting files\n")
cmdparser.add_argument("-v", action="store_true", help =" Show version")
cmdparser.add_argument("-twoD", action="store_true", help = "If added, this will generate a flat map instead of a stepped one\n")
cmdparser.add_argument("-p", action="store_false", help = "If added, this will prevent the generation of a preview picture of the map\n")
cmdparser.add_argument("-bp", action="store_false", help = "If added, this will prevent the generation of a list of the block positions\n")
cmdparser.add_argument("-ba", action="store_false", help = "If added, this will prevent the generation of a list of needed amounts of blocks\n")
cmdparser.add_argument("-s", action="store_false", help = "If added, this will prevent the generation of the schematic file\n")
cmdparser.add_argument("-minY", help = "Defines the minimum Y coordinate at which blocks are placed.\n Default = 4. Should be the block you will be standing on for schematics\n")
cmdparser.add_argument("-maxY", help = "Defines the maximum Y coordinate at which blocks are placed. Default = 250. Does not impact schematics\n")
cmdparser.add_argument("-maxS", help = "Defines the maximum sizie in X and Z of a schematic.\n Default = 128. If the picture is bigger, multiple schematics will be generated")
args = cmdparser.parse_args()
MapMaker(args)
| 38.97549 | 182 | 0.667337 |
1f4593ac5b700281a13ab944e7c95b90804f0e53
| 425 |
py
|
Python
|
doc/examples/using_jit.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 88 |
2019-01-08T16:39:08.000Z
|
2022-02-06T14:19:23.000Z
|
doc/examples/using_jit.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 13 |
2019-06-20T15:53:10.000Z
|
2021-02-09T11:03:29.000Z
|
doc/examples/using_jit.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 1 |
2019-11-05T03:03:14.000Z
|
2019-11-05T03:03:14.000Z
|
import numpy as np
from transonic import jit
def func0(a, b):
return a + b
@jit
def func1(a: int, b: int):
print("b", b)
return np.exp(a) * b * func0(a, b)
if __name__ == "__main__":
from time import sleep
a = b = np.zeros([2, 3])
for i in range(20):
print(f"{i}, call with arrays")
func1(a, b)
print(f"{i}, call with numbers")
func1(1, 1.5)
sleep(1)
| 15.178571 | 40 | 0.531765 |
23bcd4ffaefa3e89e34bce80ced4b456a28dde6b
| 445 |
py
|
Python
|
year_3/databases_sem1/lab3/api/migrations/0003_auto_20171226_2034.py
|
honchardev/KPI
|
f8425681857c02a67127ffb05c0af0563a8473e1
|
[
"MIT"
] | null | null | null |
year_3/databases_sem1/lab3/api/migrations/0003_auto_20171226_2034.py
|
honchardev/KPI
|
f8425681857c02a67127ffb05c0af0563a8473e1
|
[
"MIT"
] | 21 |
2020-03-24T16:26:04.000Z
|
2022-02-18T15:56:16.000Z
|
year_3/databases_sem1/lab3/api/migrations/0003_auto_20171226_2034.py
|
honchardev/KPI
|
f8425681857c02a67127ffb05c0af0563a8473e1
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0 on 2017-12-26 17:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_payroll'),
]
operations = [
migrations.AlterField(
model_name='payment',
name='method',
field=models.CharField(choices=[('cash', 'Cash payment method'), ('card', 'Card payment method')], max_length=4),
),
]
| 23.421053 | 125 | 0.593258 |
f1d6c9f1c379c7963cc254399f3d882b86e0a5b5
| 2,153 |
py
|
Python
|
tests/test_rechnungsposition.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
tests/test_rechnungsposition.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
tests/test_rechnungsposition.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timezone
import pytest # type:ignore[import]
from bo4e.com.rechnungsposition import Rechnungsposition, RechnungspositionSchema
from bo4e.enum.artikelid import ArtikelId
from bo4e.enum.bdewartikelnummer import BDEWArtikelnummer
from bo4e.enum.zeiteinheit import Zeiteinheit
from tests.serialization_helper import assert_serialization_roundtrip # type:ignore[import]
from tests.test_betrag import example_betrag # type:ignore[import]
from tests.test_menge import example_menge # type:ignore[import]
from tests.test_preis import example_preis # type:ignore[import]
from tests.test_steuerbetrag import example_steuerbetrag # type:ignore[import]
class TestRechnungsposition:
@pytest.mark.parametrize(
"rechnungsposition",
[
pytest.param(
Rechnungsposition(
positionsnummer=1,
lieferung_von=datetime(2021, 3, 15, tzinfo=timezone.utc),
lieferung_bis=datetime(2022, 3, 15, tzinfo=timezone.utc),
positionstext="Besonders wertvolle Rechnungsposition",
zeiteinheit=Zeiteinheit.JAHR,
artikelnummer=BDEWArtikelnummer.AUSGLEICHSENERGIE_UNTERDECKUNG,
lokations_id="51238696781",
positions_menge=example_menge,
zeitbezogene_menge=example_menge,
einzelpreis=example_preis,
teilsumme_netto=example_betrag,
teilrabatt_netto=example_betrag,
teilsumme_steuer=example_steuerbetrag,
artikel_id=ArtikelId.ARTIKEL_2017004,
),
id="maximal attributes",
)
],
)
def test_serialization_roundtrip(self, rechnungsposition):
"""
Test de-/serialisation
"""
assert_serialization_roundtrip(rechnungsposition, RechnungspositionSchema())
def test_missing_required_attribute(self):
with pytest.raises(TypeError) as excinfo:
_ = Rechnungsposition()
assert "missing 8 required" in str(excinfo.value)
| 42.215686 | 92 | 0.665583 |
7b0257e77974e2bc48d09f8e4dd6b072d78240ee
| 455 |
py
|
Python
|
Programming Languages/Python/Theory/100_Python_Challenges/Section_2_String/38. remove all occurrences of a given character from an input string.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 101 |
2021-12-20T11:57:11.000Z
|
2022-03-23T09:49:13.000Z
|
Programming Languages/Python/Theory/100_Python_Challenges/Section_2_String/38. remove all occurrences of a given character from an input string.py
|
Sid-1164/Resources
|
3987dcaeddc8825f9bc79609ff26094282b8ece1
|
[
"MIT"
] | 4 |
2022-01-12T11:55:56.000Z
|
2022-02-12T04:53:33.000Z
|
Programming Languages/Python/Theory/100_Python_Challenges/Section_2_String/38. remove all occurrences of a given character from an input string.py
|
Sid-1164/Resources
|
3987dcaeddc8825f9bc79609ff26094282b8ece1
|
[
"MIT"
] | 38 |
2022-01-12T11:56:16.000Z
|
2022-03-23T10:07:52.000Z
|
"""
Write a function that accepts a string and a character.
The function should remove all occurrences of the given character from the input string and return the string.
Example:
input_string = 'technique'
char = 'e'
Expected output = 'tchniqu'
"""
def remove_char(input_string, char):
new_str = ""
for i in input_string:
if i == char:
pass
else:
new_str = new_str + i
return new_str
| 20.681818 | 110 | 0.635165 |
19a5557b63c35974cdea0c529a2c4b4e7ccc56e9
| 2,319 |
py
|
Python
|
research/cv/SRGAN/preprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/SRGAN/preprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/SRGAN/preprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""preprocess"""
import os
import argparse
import numpy as np
from mindspore import context
from src.dataset.testdataset import create_testdataset
parser = argparse.ArgumentParser(description="SRGAN eval")
parser.add_argument("--test_LR_path", type=str, default='./Set14/LR')
parser.add_argument("--test_GT_path", type=str, default='./Set14/HR')
parser.add_argument("--result_path", type=str, default='./preprocess_path')
parser.add_argument("--device_id", type=int, default=1, help="device id, default: 0.")
args = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_id=args.device_id)
def padding(_img, target_shape):
h, w = target_shape[0], target_shape[1]
img_h, img_w, _ = _img.shape
dh, dw = h - img_h, w - img_w
if dh < 0 or dw < 0:
raise RuntimeError(f"target_shape is bigger than img.shape, {target_shape} > {_img.shape}")
if dh != 0 or dw != 0:
_img = np.pad(_img, ((0, dh), (0, dw), (0, 0)), "constant")
return _img
if __name__ == '__main__':
test_ds = create_testdataset(1, args.test_LR_path, args.test_GT_path)
test_data_loader = test_ds.create_dict_iterator(output_numpy=True)
i = 0
img_path = args.result_path
if not os.path.exists(img_path):
os.makedirs(img_path)
for data in test_data_loader:
file_name = "SRGAN_data" + "_" + str(i) + ".bin"
file_path = img_path + "/" + file_name
lr = data['LR']
lr = lr[0]
lr = lr.transpose(1, 2, 0)
org_img = padding(lr, [200, 200])
org_img = org_img.transpose(2, 0, 1)
img = org_img.copy()
img.tofile(file_path)
i = i + 1
| 39.982759 | 99 | 0.663217 |
271670ccee1d88305179d71e574ad1cc94902fd0
| 1,090 |
py
|
Python
|
nz_crawl_demo/day5/demo2.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | null | null | null |
nz_crawl_demo/day5/demo2.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 27 |
2020-02-12T07:55:58.000Z
|
2022-03-12T00:19:09.000Z
|
nz_crawl_demo/day5/demo2.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 2 |
2020-02-18T01:54:55.000Z
|
2020-02-21T11:36:28.000Z
|
from selenium import webdriver
from lxml import etree
import time
from selenium.webdriver.common.by import By
driver = webdriver.Chrome()
# driver.get("https://www.zhihu.com/signin?next=%2F")
# driver.get("https://www.baidu.com/")
driver.get("https://v3.bootcss.com/examples/signin/")
html = driver.page_source
# html = etree.HTML(html)
# elements = html.xpath("//input[@name='username']/@placeholder")[0]
# print(elements)
# inputTag = driver.find_element(By.XPATH,"//input[@name='username']")
# inputTag = driver.find_element(By.NAME,"username")
# inputTag = driver.find_element(By.CSS_SELECTOR,".SignFlow-accountInput > .Input")
# inputTag = driver.find_element(By.ID,".SignFlow-accountInput > .Input")
# inputTag = driver.find_element(By.CLASS_NAME,"Input")
# inputTag.send_keys('13888888888')
# time.sleep(3)
# inputTag.clear()
# inputTag = driver.find_element_by_id('kw')
# inputTag.send_keys('python')
# time.sleep(2)
# # inputTag.clear()
# submitTag = driver.find_element_by_id('su')
# submitTag.click()
reBtn = driver.find_element_by_css_selector('label > input ')
reBtn.click()
| 35.16129 | 83 | 0.738532 |
2786572b449ddc1407f3069c88529370b47fc8e0
| 6,737 |
py
|
Python
|
spider/get_HKProtest.py
|
iecasszyjy/tweet_search-master
|
e4978521a39964c22ae46bf35d6ff17710e8e6c6
|
[
"MIT"
] | null | null | null |
spider/get_HKProtest.py
|
iecasszyjy/tweet_search-master
|
e4978521a39964c22ae46bf35d6ff17710e8e6c6
|
[
"MIT"
] | 2 |
2021-03-31T18:54:16.000Z
|
2021-12-13T19:49:08.000Z
|
spider/get_HKProtest.py
|
iecasszyjy/tweet_search-master
|
e4978521a39964c22ae46bf35d6ff17710e8e6c6
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
# '2019香港'事件获取
import pymongo
from pymongo import InsertOne
from pymongo.errors import BulkWriteError
import random
from pprint import pprint
from tqdm import tqdm
client = pymongo.MongoClient('mongodb://3.220.111.222:27017/')
client.admin.authenticate("aircas", "aircas@2018", mechanism='SCRAM-SHA-1')
db = client['2019HongKong_protest']
def generateTrigger(triggers):
res = ''
for i in range(len(triggers)):
if i == 0:
res += '(' + triggers[i]
else:
res += ' OR ' + triggers[i]
res += ')'
return res
def generateTopic(topics):
res = ''
for i in range(len(topics)):
if i == 0:
res += topics[i]
else:
res += ' OR ' + topics[i]
return res
# 设置推特查询条件
def set_conditions():
# 设置起始、终止时间
stime = '2019-03-01'
etime = '2019-10-20'
# 设置香港事件 查询地点名词
# locations = 'Hong Kong OR ' + 'Hong Kong Island OR Central and Western District OR Eastern District OR Southern District OR Wan Chai District OR ' + 'Kowloon OR Kowloon City District OR Kwun Tong District OR Sham Shui Po District OR Wong Tai Sin District OR Yau Tsim Mong District OR ' + 'New Territories OR Island District OR Kwai Tsing District OR North District OR Sai Kung District OR Sha Tin District OR Tai Po District OR Tsuen Wan District OR Tuen Mun District OR Yuen Long District OR ' + "Kowloon Reservoir OR Kowloon City OR Kowloon Tong OR Kowloon Bay OR Pat Sin Leng OR Sheung Shui OR Sheung Wan OR To Kwa Wan OR Tai Shui Hang OR Tate's Cairn OR Tai Hang OR Tai Mei Tuk OR Tai Kok Tsui OR Tai Tung Shan OR Sunset Peak OR Tai Po Industrial Estate OR Tai Po OR Tai Po Kau OR Tai Po Market OR " + "Tai Long Wan OR Tai Wai OR Tai Mo Shan OR Tai Wo Hau OR Tai Mong Tsai OR Tai Tam Reservoirs OR Tai Tam Bay OR Tai O OR Lantau Island OR Tai Pang Wan OR Mirs Bay OR Tai Lam Chung OR Tai Lam Chung Reservoir OR Siu Sai Wan OR Siu Lam OR Central and Western OR Central OR Tseng Lan Shue OR Yuen Long OR Fan Lau OR " + "Tin Shui Wai OR Tin Hau OR Prince Edward OR Tai Koo OR Tai Wo OR Tuen Mun OR Fo Tan OR Ngau Chi Wan OR Ngau Mei Hoi OR Port Shelter OR Ngau Tau Kok OR North Point OR North OR Pak Tam Chung OR Ta Kwu Ling OR Ting Kau OR Shek Mun OR Shek Kong OR Shek Kip Mei OR Shek Tong Tsui OR Shek Pik OR Shek Pik Reservoir OR " + "Shek O OR Kei Ling Ha Hoi OR Three Fathoms Cove OR Siu Hong OR Crooked Island OR Tolo Harbour OR Tsim Sha Tsui OR East Tsim Sha Tsui OR Tsim Bei Tsui OR Sai Kung Hoi OR Inner Port Shelter OR Sai Kung OR Sai Ying Pun OR Sai Wan Ho OR Ho Man Tin OR Jordan OR Hang Hau OR Heng Fa Chuen OR Sha Tin Hoi OR Sha Tin OR " + "Sha Tin Wai OR Sha Tau Kok OR Pui O OR Tolo Channel OR Stanley OR Chek Lap Kok OR King's Park OR Wo Hop Shek OR Peng Chau OR Mong Kok OR Ngong Ping OR Ngong Suen Chau OR Stonecutters Island OR Tung Ping Chau OR Tung Chung OR Eastern OR Tung Lung Chau OR Kwo Chau Kwan To OR Lam Tsuen OR Sunny Bay OR Ho Pui Reservoir OR " + "Yau Tsim Mong OR Yau Ma Tei OR Yau Tong OR Admiralty OR Cheung Sha Wan OR Cheung Chau OR Tsing Shan OR Castle Peak OR Tsing Yi OR Tsing Lung Tau"
locations = ['Hong Kong', 'Hong Kong Island', 'Central and Western District', 'Eastern District', 'Southern District', 'Wan Chai District', 'Kowloon', 'Kowloon City District', 'Kwun Tong District', 'Sham Shui Po District',
'Wong Tai Sin District', 'Yau Tsim Mong District', 'New Territories', 'Island District', 'Kwai Tsing District', 'North District', 'Sai Kung District', 'Sha Tin District', 'Tai Po District', 'Tsuen Wan District',
'Tuen Mun District', 'Yuen Long District', 'Kowloon Reservoir', 'Kowloon City', 'Kowloon Tong', 'Kowloon Bay', 'Pat Sin Leng', 'Sheung Shui', 'Sheung Wan', 'To Kwa Wan',
'Tai Shui Hang', "Tate's Cairn", 'Tai Hang', 'Tai Mei Tuk', 'Tai Kok Tsui', 'Tai Tung Shan', 'Sunset Peak', 'Tai Po Industrial Estate', 'Tai Po', 'Lantau Island',
'Tai Po Kau', 'Tai Po Market', 'Tai Long Wan', 'Tai Wai', 'Tai Mo Shan', 'Tai Wo Hau', 'Tai Mong Tsai', 'Tai Tam Reservoirs', 'Tai Tam Bay', 'Tai O',
'Tai Pang Wan', 'Mirs Bay', 'Tai Lam Chung', 'Tai Lam Chung Reservoir', 'Siu Sai Wan', 'Siu Lam', 'Central and Western', 'Central', 'Tseng Lan Shue', 'Yuen Long',
'Fan Lau', 'Tin Shui Wai', 'Tin Hau', 'Prince Edward', 'Tai Koo', 'Tai Wo', 'Tuen Mun', 'Fo Tan', 'Ngau Chi Wan', 'Ngau Mei Hoi',
'Port Shelter', 'Ngau Tau Kok', 'North Point', 'North', 'Pak Tam Chung', 'Ta Kwu Ling', 'Ting Kau', 'Shek Mun', 'Shek Kong', 'Shek Kip Mei',
'Shek Tong Tsui', 'Shek Pik', 'Shek Pik Reservoir', 'Shek O', 'Kei Ling Ha Hoi', 'Three Fathoms Cove', 'Siu Hong', 'Crooked Island', 'Tolo Harbour', 'Tsim Sha Tsui',
'East Tsim Sha Tsui', 'Tsim Bei Tsui', 'Sai Kung Hoi', 'Inner Port Shelter', 'Sai Kung', 'Sai Ying Pun', 'Sai Wan Ho', 'Ho Man Tin', 'Jordan', 'Hang Hau',
'Heng Fa Chuen', 'Sha Tin Hoi', 'Sha Tin', 'Sha Tin Wai', 'Sha Tau Kok', 'Pui O', 'Tolo Channel', 'Stanley', 'Chek Lap Kok', "King's Park",
'Wo Hop Shek', 'Peng Chau', 'Mong Kok', 'Ngong Ping', 'Ngong Suen Chau', 'Stonecutters Island', 'Tung Ping Chau', 'Tung Chung', 'Eastern', 'Tung Lung Chau',
'Kwo Chau Kwan To', 'Lam Tsuen', 'Sunny Bay', 'Ho Pui Reservoir', 'Yau Tsim Mong', 'Yau Ma Tei', 'Yau Tong', 'Admiralty', 'Cheung Sha Wan', 'Cheung Chau',
'Tsing Shan', 'Castle Peak', 'Tsing Yi', 'Tsing Lung Tau']
# 香港事件 查询关键词
triggers = ['protest', 'protests', 'protesters', 'citizens', 'march', 'marched', 'police', 'government', 'officers', 'lam',
'carrie', 'political', 'force', 'violence', 'riot', 'mainland', 'independent', 'lawmakers', 'revolution']
# 香港事件 查询话题
topics = ['#HongKong', '#HongKongProtests', '#HongKongProtesters', '#HK', '#HKprotests', '#FreeHK', '#china', '#StandWithHongKong', '#FightForFreedomStandWithHongKong', '#香港']
return stime, etime, locations, triggers, topics
if __name__ == '__main__':
stime, etime, locations, triggers, topics = set_conditions()
# 事件查询条件放入MongoDB数据库 按地点名做事件循环
requests = list()
triggersStr = generateTrigger(triggers)
topicsStr = generateTopic(topics)
for loc in locations:
eventId = hash(stime + etime + loc + triggersStr + topicsStr)
requests.append(InsertOne({'id': eventId,
'event': {'stime': stime, 'etime': etime, 'location': loc, 'triggers': triggersStr, 'topics': topicsStr}}))
try:
result = db.event_list.bulk_write(requests)
pprint(result.bulk_api_result)
except BulkWriteError as bwe:
pprint(bwe.details)
client.close()
| 81.168675 | 2,232 | 0.659641 |
9a2534954721c24643d91d34abc0efe06b17e9c4
| 3,015 |
py
|
Python
|
ts2panda/scripts/run.py
|
openharmony-sig-ci/ark_ts2abc
|
1d6fac6447760fce2e81c3738ac735b4424eed31
|
[
"Apache-2.0"
] | null | null | null |
ts2panda/scripts/run.py
|
openharmony-sig-ci/ark_ts2abc
|
1d6fac6447760fce2e81c3738ac735b4424eed31
|
[
"Apache-2.0"
] | null | null | null |
ts2panda/scripts/run.py
|
openharmony-sig-ci/ark_ts2abc
|
1d6fac6447760fce2e81c3738ac735b4424eed31
|
[
"Apache-2.0"
] | 2 |
2021-09-13T11:32:30.000Z
|
2021-09-13T12:12:06.000Z
|
#!/usr/bin/env python3
# coding: utf-8
"""
Copyright (c) 2021 Huawei Device Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Description: Compile ark front-end code with tsc
"""
import os
import subprocess
import argparse
import platform
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--src-dir',
help='Source directory')
parser.add_argument('--dist-dir',
help='Destination directory')
parser.add_argument('--platform',
help='platform, as: linux, mac, win')
parser.add_argument('--node',
help='node path')
parser.add_argument("--node-modules",
help='path to node-modules exetuable')
arguments = parser.parse_args()
return arguments
def set_env(node_dir):
jsoner_format = ":"
if platform.system() == "Windows":
jsoner_format = ";"
os.environ["PATH"] = f'{node_dir}{jsoner_format}{os.environ["PATH"]}'
def run_command(cmd, execution_path=os.getcwd()):
print(" ".join(cmd) + " | execution_path: " + execution_path)
proc = subprocess.Popen(cmd, cwd=execution_path)
ret = proc.wait()
assert not ret, f'\n{" ".join(cmd)} failed'
def node_modules(options):
src_dir = options.src_dir
dist_dir = options.dist_dir
run_command(['cp', '-f', os.path.join(src_dir, "package.json"),
os.path.join(dist_dir, "package.json")])
run_command(['cp', '-f', os.path.join(src_dir, "package-lock.json"),
os.path.join(dist_dir, "package-lock.json")])
if options.node_modules:
run_command(['cp', '-rf', options.node_modules,
os.path.join(dist_dir, "node_modules")])
else:
run_command(['npm', 'install'], dist_dir)
def npm_run_build(options):
plat_form = options.platform
node_modules_dir = os.path.join(options.dist_dir, 'node_modules')
tsc = os.path.join(node_modules_dir, "typescript/bin/tsc")
os.environ["NODE_PATH"] = node_modules_dir
if plat_form == "linux":
cmd = [tsc, '-b', 'src']
run_command(cmd, options.dist_dir)
elif plat_form == "win":
cmd = [tsc, '-b', 'src/tsconfig.win.json']
run_command(cmd, options.dist_dir)
elif plat_form == 'mac':
cmd = [tsc, '-b', 'src/tsconfig.mac.json']
run_command(cmd, options.dist_dir)
if __name__ == "__main__":
ARGS = parse_args()
set_env(ARGS.node)
node_modules(ARGS)
npm_run_build(ARGS)
| 30.765306 | 73 | 0.643781 |
9a6ea1ca15de37cc3b3d5f33f269730b46239e7a
| 7,983 |
py
|
Python
|
mongo_cache/__init__.py
|
pricez/werkzeug_cache_mongodb
|
039a7cf6f70ac0a7f36ef3885b1012e014e638c6
|
[
"MIT"
] | null | null | null |
mongo_cache/__init__.py
|
pricez/werkzeug_cache_mongodb
|
039a7cf6f70ac0a7f36ef3885b1012e014e638c6
|
[
"MIT"
] | 5 |
2015-07-08T00:58:13.000Z
|
2015-07-20T21:34:38.000Z
|
mongo_cache/__init__.py
|
pricez/werkzeug_cache_mongodb
|
039a7cf6f70ac0a7f36ef3885b1012e014e638c6
|
[
"MIT"
] | null | null | null |
# coding: UTF-8
import pickle
from werkzeug.contrib.cache import BaseCache
from pymongo import MongoClient
from pymongo.errors import PyMongoError
from time import time
from bson.binary import Binary
class MongoCache(BaseCache):
"""Cache that uses MongoDB to store data.
:param default_timeout: the default timeout (in seconds) that is used if no
timeout is specified on :meth:`set`. A timeout of 0
indicates that the cache never expires.
"""
def __init__(self, default_timeout=300):
super(MongoCache, self).__init__(default_timeout)
_connection = MongoClient()
_database = _connection['TestCache']
self.collection = _database['Cache']
def _pickle(self, obj):
if not str(obj).isdigit():
_bytes = pickle.dumps(obj)
obj = Binary(_bytes)
return obj
def _unpickle(self, binary):
if isinstance(binary, Binary):
return pickle.loads(binary)
return binary
def _get_expiration(self, timeout):
if timeout is None:
timeout = self.default_timeout
if timeout > 0:
timeout = self._time() + timeout
return timeout
def _time(self):
"""
Wrapper funcion for time.time() for easier mocking
"""
return time()
def _verify_timeout(self, doc):
"""Verifies if a document has expired.
:param doc: document to verify.
:returns: Whether the document has expired or not.
:rtype: boolean
"""
expires = doc['expires']
if expires == 0:
return False
if expires >= self._time():
return False
return True
def _get_doc(self, key, value, timeout):
return {
'_id': key,
'value': self._pickle(value),
'expires': self._get_expiration(timeout)
}
def get(self, key):
"""Look up key in the cache and return the value for it.
:param key: the key to be looked up.
:returns: The value if it exists and is readable, else ``None``.
"""
_filter = {'_id': key}
doc = self.collection.find_one(_filter)
if doc and not self._verify_timeout(doc):
return self._unpickle(doc['value'])
def delete(self, key):
"""Delete `key` from the cache.
:param key: the key to delete.
:returns: Whether the key existed and has been deleted.
:rtype: boolean
"""
_filter = {'_id': key}
count = self.collection.count(_filter)
if count:
self.collection.remove(_filter)
return True
return False
def get_many(self, *keys):
"""Returns a list of values for the given keys.
For each key a item in the list is created::
foo, bar = cache.get_many("foo", "bar")
Has the same error handling as :meth:`get`.
:param keys: The function accepts multiple keys as positional
arguments.
"""
key_x_value = self.get_dict(*keys)
return [key_x_value[key] for key in keys]
def get_dict(self, *keys):
"""Like :meth:`get_many` but return a dict::
d = cache.get_dict("foo", "bar")
foo = d["foo"]
bar = d["bar"]
:param keys: The function accepts multiple keys as positional
arguments.
"""
result = {}
documents = self.collection.find({'_id': {'$in': keys}})
for document in documents:
if self._verify_timeout(document):
result[document['_id']] = None
else:
result[document['_id']] = self._unpickle(document['value'])
return result
def set(self, key, value, timeout=None):
"""Add a new key/value to the cache (overwrites value, if key already
exists in the cache).
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout). A timeout of 0 idicates
that the cache never expires.
:returns: ``True`` if key has been updated, ``False`` for backend
errors. Pickling errors, however, will raise a subclass of
``pickle.PickleError``.
:rtype: boolean
"""
doc = self._get_doc(key, value, timeout)
inserted = self.collection.save(doc)
return True
def add(self, key, value, timeout=None):
"""Works like :meth:`set` but does not overwrite the values of already
existing keys.
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key or the default
timeout if not specified. A timeout of 0 indicates
that the cache never expires.
:returns: Same as :meth:`set`, but also ``False`` for already
existing keys.
:rtype: boolean
"""
if self.has(key):
return False
return self.set(key, value, timeout)
def set_many(self, mapping, timeout=None):
"""Sets multiple keys and values from a mapping.
:param mapping: a mapping with the keys/values to set.
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout). A timeout of 0
indicates tht the cache never expires.
:returns: Whether all given keys have been set.
:rtype: boolean
"""
values = [self._get_doc(key, value, timeout) for key, value in mapping.iteritems()]
self.collection.insert_many(values)
return True
def delete_many(self, *keys):
"""Deletes multiple keys at once.
:param keys: The function accepts multiple keys as positional
arguments.
:returns: Whether all given keys have been deleted.
:rtype: boolean
"""
self.collection.remove({'_id': {'$in': keys}})
return True
def has(self, key):
"""Checks if a key exists in the cache without returning it. This is a
cheap operation that bypasses loading the actual data on the backend.
This method is optional and may not be implemented on all caches.
:param key: the key to check
"""
return self.collection.find_one({'_id': key}) is not None
def clear(self):
"""Clears the cache. Keep in mind that not all caches support
completely clearing the cache.
:returns: Whether the cache has been cleared.
:rtype: boolean
"""
self.collection.drop()
return True
def inc(self, key, delta=1):
"""Increments the value of a key by `delta`. If the key does
not yet exist it is initialized with `delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to add.
:returns: The new value or ``None`` for backend errors.
"""
if self.has(key):
_filter = {'_id': key}
document = {'$inc': {'value': delta}}
try:
self.collection.update(_filter, document)
except PyMongoError:
return None
else:
self.add(key, delta)
return self.get(key)
def dec(self, key, delta=1):
"""Decrements the value of a key by `delta`. If the key does
not yet exist it is initialized with `-delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to subtract.
:returns: The new value or `None` for backend errors.
"""
return self.inc(key, -delta)
| 35.638393 | 91 | 0.57635 |
ef72fcac0c9daefe02a1acaf326730e174aa3246
| 1,318 |
py
|
Python
|
src/bo4e/com/betrag.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
src/bo4e/com/betrag.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
src/bo4e/com/betrag.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
"""
Contains Betrag class
and corresponding marshmallow schema for de-/serialization
"""
from decimal import Decimal
import attr
from marshmallow import fields
from marshmallow_enum import EnumField # type:ignore[import]
from bo4e.com.com import COM, COMSchema
from bo4e.enum.waehrungscode import Waehrungscode
# pylint: disable=too-few-public-methods
@attr.s(auto_attribs=True, kw_only=True)
class Betrag(COM):
"""
Die Komponente wird dazu verwendet, Summenbeträge (beispielsweise in Angeboten und Rechnungen) als Geldbeträge
abzubilden. Die Einheit ist dabei immer die Hauptwährung also Euro, Dollar etc…
.. HINT::
`Betrag JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/BetragSchema.json>`_
"""
# required attributes
wert: Decimal = attr.ib(validator=attr.validators.instance_of(Decimal)) #: Gibt den Betrag des Preises an.
waehrung: Waehrungscode = attr.ib(
validator=attr.validators.instance_of(Waehrungscode)
) #: Die entsprechende Waehrung
class BetragSchema(COMSchema):
"""
Schema for de-/serialization of Betrag
"""
class_name = Betrag
# required attributes
wert = fields.Decimal(as_string=True)
waehrung = EnumField(Waehrungscode)
| 29.954545 | 167 | 0.743551 |
ef5c7f42535bdc7001316bd2f0ff713f7ae21b7e
| 38,735 |
py
|
Python
|
SLIX/toolbox.py
|
oliviaguest/SLIX
|
2f19382c650267d0a76456c796cd3e0afe04d880
|
[
"MIT"
] | null | null | null |
SLIX/toolbox.py
|
oliviaguest/SLIX
|
2f19382c650267d0a76456c796cd3e0afe04d880
|
[
"MIT"
] | null | null | null |
SLIX/toolbox.py
|
oliviaguest/SLIX
|
2f19382c650267d0a76456c796cd3e0afe04d880
|
[
"MIT"
] | null | null | null |
import multiprocessing
import nibabel
import numpy
import pymp
import tifffile
import tqdm
import time
from scipy.signal import peak_widths, savgol_filter, find_peaks, peak_prominences
pymp.config.nested = True
# DEFAULT PARAMETERS
BACKGROUND_COLOR = -1
CPU_COUNT = min(16, multiprocessing.cpu_count())
MAX_DISTANCE_FOR_CENTROID_ESTIMATION = 2
NUMBER_OF_SAMPLES = 100
TARGET_PEAK_HEIGHT = 0.94
TARGET_PROMINENCE = 0.08
def all_peaks(line_profile, cut_edges=True):
"""
Detect all peaks from a given line profile in an SLI measurement. Peaks will not be filtered in any way.
To detect only significant peaks, use the 'peak_positions' method and apply thresholds.
Parameters
----------
line_profile: 1D-NumPy array with all intensity values of a single image pixel in the stack.
cut_edges: If True, only consider peaks within the second third of all detected peaks.
Returns
-------
List with the positions of all detected peaks.
"""
number_of_measurements = line_profile.shape[0] // 2
# Generate peaks
maxima, _ = find_peaks(line_profile)
# Only consider peaks which are in bounds
if cut_edges:
maxima = maxima[(maxima >= number_of_measurements // 2) & (maxima <= len(line_profile) -
number_of_measurements // 2)]
# Filter double peak
if numpy.all(numpy.isin([number_of_measurements // 2,
len(line_profile) - number_of_measurements // 2], maxima)):
maxima = maxima[1:]
return maxima
def num_peaks_image(roiset, low_prominence=TARGET_PROMINENCE, high_prominence=numpy.inf, cut_edges=True):
"""
Calculate the number of peaks from each line profile in an SLI image series by detecting all peaks and applying thresholds to
remove unwanted peaks.
Parameters
----------
roiset: Full SLI measurement (series of images) which is prepared for the pipeline using the SLIX toolbox methods.
low_prominence: Lower prominence bound for detecting a peak.
high_prominence: Higher prominence bound for detecting a peak.
cut_edges: If True, only consider peaks within the second third of all detected peaks.
Returns
-------
NumPy array where each entry corresponds to the number of detected peaks within the first dimension of the SLI image series.
"""
return_value = pymp.shared.array((roiset.shape[0], 1), dtype=numpy.int32)
pbar = tqdm.tqdm(total=len(roiset), desc='Number of peaks')
number_of_finished_pixels = pymp.shared.array(CPU_COUNT, dtype=numpy.long)
last_sum_of_finished_pixels = 0
active_cores = pymp.shared.array(CPU_COUNT, dtype=numpy.bool)
active_cores[:] = True
with pymp.Parallel(CPU_COUNT) as p:
number_of_finished_pixels[p.thread_num] = 0
for i in p.range(0, len(roiset)):
roi = roiset[i]
peaks = all_peaks(roi, cut_edges)
return_value[i] = len(accurate_peak_positions(peaks, roi, low_prominence, high_prominence, False))
number_of_finished_pixels[p.thread_num] += 1
if p.thread_num == 0 and number_of_finished_pixels[p.thread_num] % 1000 == 0:
sum_of_finished_pixels = numpy.sum(number_of_finished_pixels)
pbar.update(sum_of_finished_pixels - last_sum_of_finished_pixels)
last_sum_of_finished_pixels = sum_of_finished_pixels
# When one core has finished, mark it. As long as not all threads are finished continue to update the
# progress bar.
active_cores[p.thread_num] = False
if p.thread_num == 0:
while numpy.any(active_cores == True):
time.sleep(0.5)
sum_of_finished_pixels = numpy.sum(number_of_finished_pixels)
pbar.update(sum_of_finished_pixels - last_sum_of_finished_pixels)
last_sum_of_finished_pixels = sum_of_finished_pixels
pbar.close()
return return_value
def accurate_peak_positions(peak_positions, line_profile, low_prominence=TARGET_PROMINENCE, high_prominence=numpy.inf,
centroid_calculation=True):
"""
Post-processing method after peaks have been calculated using the 'all_peaks' method. The peak are filtered based
on their peak prominence. Additionally, peak positions can be corrected by applying centroid corrections based on the
line profile.
Parameters
----------
peak_positions: Detected peak positions of the 'all_peaks' method.
line_profile: Original line profile used to detect all peaks. This array will be further
analyzed to better determine the peak positions.
low_prominence: Lower prominence bound for detecting a peak.
high_prominence: Higher prominence bound for detecting a peak.
centroid_calculation: Use centroid calculation to better determine the peak position regardless of the number of
measurements / illumination angles used.
Returns
-------
NumPy array with the positions of all detected peaks.
"""
n_roi = normalize(line_profile)
peak_prominence = numpy.array(peak_prominences(n_roi, peak_positions)[0])
selected_peaks = peak_positions[(peak_prominence > low_prominence) & (peak_prominence < high_prominence)]
if centroid_calculation:
return centroid_correction(n_roi, selected_peaks, low_prominence, high_prominence)
return selected_peaks
def peakdistance(peak_positions, number_of_measurements):
"""
Calculate the mean peak distance in degrees between two corresponding peaks within a line profile.
Parameters
----------
peak_positions: Detected peak positions of the 'all_peaks' method.
number_of_measurements: Number of images in the SLI image stack, i.e. the number of points in one
line profile.
Returns
-------
Floating point value containing the mean peak distance of the line profile in degrees.
"""
# Scale peaks correctly for direction
peak_positions = (peak_positions - number_of_measurements // 2) * (360.0 / number_of_measurements)
num_peaks = len(peak_positions)
# Compute peak distance for curves with 1-2 detected peaks
if num_peaks == 1: # distance for one peak = 0
return 0
if num_peaks >= 2 and num_peaks % 2 == 0:
distances = numpy.abs(peak_positions[::2] - peak_positions[1::2])
dist = distances.mean()
if dist > 180:
dist = 360 - dist
return dist
else:
return BACKGROUND_COLOR
def peakdistance_image(roiset, low_prominence=TARGET_PROMINENCE, high_prominence=numpy.inf, cut_edges=True,
centroid_calculation=True):
"""
Calculate the mean peak distance in degrees between two corresponding peaks for each line profile in an SLI image
series.
Note: Please do not use this method when evaluating many line profiles while generating most if not all of the
parameter maps. In this case, it is faster to write a simple pipeline as seen in 'SLIXParameterGenerator'.
Parameters
----------
roiset: Full SLI measurement (series of images) which is prepared for the pipeline using the SLIX toolbox methods.
low_prominence: Lower prominence bound for detecting a peak.
high_prominence: Higher prominence bound for detecting a peak.
cut_edges: If True, only consider peaks within the second third of all detected peaks.
centroid_calculation: Use centroid calculation to better determine the peak position regardless of the number of
measurements / illumination angles used.
Returns
-------
NumPy array of floating point values containing the mean peak distance of the line profiles in degrees.
"""
return_value = pymp.shared.array((roiset.shape[0], 1), dtype=numpy.float)
pbar = tqdm.tqdm(total=len(roiset), desc='Peak distance')
number_of_finished_pixels = pymp.shared.array(CPU_COUNT, dtype=numpy.long)
last_sum_of_finished_pixels = 0
active_cores = pymp.shared.array(CPU_COUNT, dtype=numpy.bool)
active_cores[:] = True
with pymp.Parallel(CPU_COUNT) as p:
number_of_finished_pixels[p.thread_num] = 0
for i in p.range(0, len(roiset)):
roi = roiset[i]
peaks = all_peaks(roi, cut_edges)
peaks = accurate_peak_positions(peaks, roi, low_prominence, high_prominence, centroid_calculation)
return_value[i] = peakdistance(peaks, len(roi))
number_of_finished_pixels[p.thread_num] += 1
if p.thread_num == 0 and number_of_finished_pixels[p.thread_num] % 1000 == 0:
sum_of_finished_pixels = numpy.sum(number_of_finished_pixels)
pbar.update(sum_of_finished_pixels - last_sum_of_finished_pixels)
last_sum_of_finished_pixels = sum_of_finished_pixels
# When one core has finished, mark it. As long as not all threads are finished continue to update the
# progress bar.
active_cores[p.thread_num] = False
if p.thread_num == 0:
while numpy.any(active_cores == True):
time.sleep(0.5)
sum_of_finished_pixels = numpy.sum(number_of_finished_pixels)
pbar.update(sum_of_finished_pixels - last_sum_of_finished_pixels)
last_sum_of_finished_pixels = sum_of_finished_pixels
pbar.close()
return return_value
def prominence(peak_positions, line_profile):
"""
Calculate the mean peak prominence of all given peak positions within a line profile. The line profile will be
normalized by dividing the line profile through its mean value. Therefore, values above 1 are possible.
Parameters
----------
peak_positions: Detected peak positions of the 'all_peaks' method.
line_profile: Original line profile used to detect all peaks. This array will be further
analyzed to better determine the peak positions.
Returns
-------
Floating point value containing the mean peak prominence of the line profile in degrees.
"""
num_peaks = len(peak_positions)
prominence_roi = normalize(line_profile, kind_of_normalization=1)
return 0 if num_peaks == 0 else numpy.mean(peak_prominences(prominence_roi, peak_positions)[0])
def prominence_image(roiset, low_prominence=TARGET_PROMINENCE, high_prominence=numpy.inf, cut_edges=True):
"""
Calculate the mean peak prominence of all given peak positions for each line profile in an SLI image series. Each
line profile will be normalized by dividing the line profile through its mean value. Therefore, values above 1 are
possible.
Note: Please do not use this method when evaluating many line profiles while generating most if not all of the
parameter maps. In this case, it is faster to write a simple pipeline as seen in 'SLIXParameterGenerator'.
Parameters
----------
roiset: Full SLI measurement (series of images) which is prepared for the pipeline using the SLIX toolbox methods.
low_prominence: Lower prominence bound for detecting a peak.
high_prominence: Higher prominence bound for detecting a peak.
cut_edges: If True, only consider peaks within the second third of all detected peaks.
Returns
-------
NumPy array where each entry corresponds to the mean peak prominence of the line profile.
"""
return_value = pymp.shared.array((roiset.shape[0], 1), dtype=numpy.float)
pbar = tqdm.tqdm(total=len(roiset), desc='Peak prominence')
number_of_finished_pixels = pymp.shared.array(CPU_COUNT, dtype=numpy.long)
last_sum_of_finished_pixels = 0
active_cores = pymp.shared.array(CPU_COUNT, dtype=numpy.bool)
active_cores[:] = True
with pymp.Parallel(CPU_COUNT) as p:
number_of_finished_pixels[p.thread_num] = 0
for i in p.range(0, len(roiset)):
roi = roiset[i]
peaks = all_peaks(roi, cut_edges)
peaks = accurate_peak_positions(peaks, roi, low_prominence, high_prominence, False)
return_value[i] = prominence(peaks, roi)
number_of_finished_pixels[p.thread_num] += 1
if p.thread_num == 0 and number_of_finished_pixels[p.thread_num] % 1000 == 0:
sum_of_finished_pixels = numpy.sum(number_of_finished_pixels)
pbar.update(sum_of_finished_pixels - last_sum_of_finished_pixels)
last_sum_of_finished_pixels = sum_of_finished_pixels
# When one core has finished, mark it. As long as not all threads are finished continue to update the
# progress bar.
active_cores[p.thread_num] = False
if p.thread_num == 0:
while numpy.any(active_cores == True):
time.sleep(0.5)
sum_of_finished_pixels = numpy.sum(number_of_finished_pixels)
pbar.update(sum_of_finished_pixels - last_sum_of_finished_pixels)
last_sum_of_finished_pixels = sum_of_finished_pixels
pbar.close()
return return_value
def peakwidth(peak_positions, line_profile, number_of_measurements):
"""
Parameters
----------
peak_positions: Detected peak positions of the 'all_peaks' method.
line_profile: Original line profile used to detect all peaks. This array will be further
analyzed to better determine the peak positions.
number_of_measurements: Number of measurements during a full SLI measurement, i.e. the number of points in one line
profile.
Returns
-------
Floating point value containing the mean peak width of the line profile in degrees.
"""
num_peaks = len(peak_positions)
if num_peaks > 0:
widths = peak_widths(line_profile, peak_positions, rel_height=0.5)
return numpy.mean(widths[0]) * (360.0 / number_of_measurements)
else:
return 0
def peakwidth_image(roiset, low_prominence=TARGET_PROMINENCE, high_prominence=numpy.inf, cut_edges=True):
"""
Note: Please do not use this method when evaluating many line profiles while generating most if not all of the
parameter maps. In this case, it is faster to write a simple pipeline as seen in 'SLIXParameterGenerator'.
Parameters
----------
roiset: Full SLI measurement (series of images) which is prepared for the pipeline using the SLIX toolbox methods.
low_prominence: Lower prominence bound for detecting a peak.
high_prominence: Higher prominence bound for detecting a peak.
cut_edges: If True, only consider peaks within the second third of all detected peaks.
Returns
-------
NumPy array where each entry corresponds to the mean peak width of the line profile.
"""
return_value = pymp.shared.array((roiset.shape[0], 1), dtype=numpy.float)
pbar = tqdm.tqdm(total=len(roiset), desc='Peak width')
number_of_finished_pixels = pymp.shared.array(CPU_COUNT, dtype=numpy.long)
last_sum_of_finished_pixels = 0
active_cores = pymp.shared.array(CPU_COUNT, dtype=numpy.bool)
active_cores[:] = True
with pymp.Parallel(CPU_COUNT) as p:
number_of_finished_pixels[p.thread_num] = 0
for i in p.range(0, len(roiset)):
roi = roiset[i]
peaks = all_peaks(roi, cut_edges)
peaks = accurate_peak_positions(peaks, roi, low_prominence, high_prominence, False)
return_value[i] = peakwidth(peaks, roi, len(roi) // 2)
number_of_finished_pixels[p.thread_num] += 1
if p.thread_num == 0 and number_of_finished_pixels[p.thread_num] % 1000 == 0:
sum_of_finished_pixels = numpy.sum(number_of_finished_pixels)
pbar.update(sum_of_finished_pixels - last_sum_of_finished_pixels)
last_sum_of_finished_pixels = sum_of_finished_pixels
# When one core has finished, mark it. As long as not all threads are finished continue to update the
# progress bar.
active_cores[p.thread_num] = False
if p.thread_num == 0:
while numpy.any(active_cores == True):
time.sleep(0.5)
sum_of_finished_pixels = numpy.sum(number_of_finished_pixels)
pbar.update(sum_of_finished_pixels - last_sum_of_finished_pixels)
last_sum_of_finished_pixels = sum_of_finished_pixels
pbar.close()
return return_value
def crossing_direction(peak_positions, number_of_measurements):
"""
Calculate up to three direction angles based on the given peak positions. If more than six peaks are present, no
direction angle will be calculated to avoid errors. This will result in a direction angle of BACKGROUND_COLOR.
The peak positions are determined by the position of the corresponding peak pairs (i.e. 6 peaks: 1+4, 2+5, 3+6).
If two peaks are too far away or too near (outside of 180°±35°), the direction angle will be considered as invalid,
resulting in a direction angle of BACKGROUND_COLOR.
Parameters
----------
peak_positions: Detected peak positions of the 'all_peaks' method.
number_of_measurements: Number of measurements during a full SLI measurement, i.e. the number of points in the line
profile.
Returns
-------
NumPy array with the shape (3,) containing up to three direction angles. If a direction angle is invalid or missing,
the array entry will be BACKGROUND_COLOR instead.
"""
num_peaks = len(peak_positions)
# Scale peaks correctly for direction
peak_positions = (peak_positions - number_of_measurements // 2) * (360.0 / number_of_measurements)
# Change behaviour based on amount of peaks (steep, crossing, ...)
ret_val = numpy.full(3, BACKGROUND_COLOR, dtype=numpy.float)
if num_peaks == 1:
ret_val[0] = (270.0 - peak_positions[0]) % 180
elif num_peaks % 2 == 0 and num_peaks <= 6:
ret_val[:num_peaks // 2] = (270.0 - ((peak_positions[num_peaks // 2:] +
peak_positions[:num_peaks // 2]) / 2.0)) % 180
if num_peaks > 2:
distances = peak_positions[num_peaks // 2:] - peak_positions[:num_peaks // 2]
ret_val[:len(distances)][numpy.abs(distances - 180) > 35] = BACKGROUND_COLOR
return ret_val
def crossing_direction_image(roiset, low_prominence=TARGET_PROMINENCE, high_prominence=numpy.inf, cut_edges=True):
"""
Calculate up to three direction angles based on the given peak positions. If more than six peaks are present, no
direction angle will be calculated to avoid errors. This will result in a direction angle of BACKGROUND_COLOR.
The peak positions are determined by the position of the corresponding peak pairs (i.e. 6 peaks: 1+4, 2+5, 3+6).
If two peaks are too far away or too near (outside of 180°±35°), the direction angle will be considered as invalid,
resulting in a direction angle of BACKGROUND_COLOR.
Note: Please do not use this method when evaluating many line profiles while generating most if not all of the
parameter maps. In this case, it is faster to write a simple pipeline as seen in 'SLIXParameterGenerator'.
Parameters
----------
roiset: Full SLI measurement (image series) which is prepared for the pipeline using the SLIX toolbox methods.
low_prominence: Lower prominence bound for detecting a peak.
high_prominence: Higher prominence bound for detecting a peak.
cut_edges: If True, only consider peaks within the second third of all detected peaks.
Returns
-------
NumPy array with the shape (x, 3) containing up to three direction angles.
x equals the number of pixels of the SLI image series. If a direction angle is invalid or missing, the array entry
will be BACKGROUND_COLOR instead.
"""
return_value = pymp.shared.array((roiset.shape[0], 3), dtype=numpy.float)
pbar = tqdm.tqdm(total=len(roiset), desc='Direction')
number_of_finished_pixels = pymp.shared.array(CPU_COUNT, dtype=numpy.long)
last_sum_of_finished_pixels = 0
active_cores = pymp.shared.array(CPU_COUNT, dtype=numpy.bool)
active_cores[:] = True
with pymp.Parallel(CPU_COUNT) as p:
number_of_finished_pixels[p.thread_num] = 0
for i in p.range(0, len(roiset)):
roi = roiset[i]
peaks = all_peaks(roi, cut_edges)
peaks = accurate_peak_positions(peaks, roi, low_prominence, high_prominence)
return_value[i, :] = crossing_direction(peaks, len(roi) // 2)
number_of_finished_pixels[p.thread_num] += 1
if p.thread_num == 0 and number_of_finished_pixels[p.thread_num] % 1000 == 0:
sum_of_finished_pixels = numpy.sum(number_of_finished_pixels)
pbar.update(sum_of_finished_pixels - last_sum_of_finished_pixels)
last_sum_of_finished_pixels = sum_of_finished_pixels
# When one core has finished, mark it. As long as not all threads are finished continue to update the
# progress bar.
active_cores[p.thread_num] = False
if p.thread_num == 0:
while numpy.any(active_cores == True):
time.sleep(0.5)
sum_of_finished_pixels = numpy.sum(number_of_finished_pixels)
pbar.update(sum_of_finished_pixels - last_sum_of_finished_pixels)
last_sum_of_finished_pixels = sum_of_finished_pixels
pbar.close()
return return_value
def non_crossing_direction(peak_positions, number_of_measurements):
"""
Calculate one direction angle based on the given peak positions. If more than two peaks are present, no
direction angle will be calculated to avoid errors. This will result in a direction angle of BACKGROUND_COLOR.
The direction angle is determined by the mid position between two peaks.
Parameters
----------
peak_positions: Detected peak positions of the 'all_peaks' method.
number_of_measurements: Number of images in an SLI image stack, i.e. the number of points in the line
profile.
Returns
-------
Floating point value containing the direction angle in degrees.
If a direction angle is invalid or missing, the returned value will be BACKGROUND_COLOR instead.
"""
num_peaks = len(peak_positions)
# Scale peaks correctly for direction
peak_positions = (peak_positions - number_of_measurements // 2) * (360.0 / number_of_measurements)
# Change behaviour based on amount of peaks (steep, crossing, ...)
if num_peaks == 1:
return (270 - peak_positions[0]) % 180
elif num_peaks == 2:
return (270 - ((peak_positions[1] + peak_positions[0]) / 2.0)) % 180
else:
return BACKGROUND_COLOR
def non_crossing_direction_image(roiset, low_prominence=TARGET_PROMINENCE, high_prominence=numpy.inf, cut_edges=True):
"""
Calculate one direction angle based on the given peak positions. If more than two peaks are present, no
direction angle will be calculated to avoid errors. This will result in a direction angle of BACKGROUND_COLOR.
The direction angle is determined by the mid position between two peaks.
Note: Please do not use this method when evaluating many line profiles while generating most if not all of the
parameter maps. In this case, it is faster to write a simple pipeline as seen in SLIXParameterGenerator.
Parameters
----------
roiset: Full SLI measurement (image series) which is prepared for the pipeline using the SLIX toolbox methods.
low_prominence: Lower prominence bound for detecting a peak.
high_prominence: Higher prominence bound for detecting a peak.
cut_edges: If True, only consider peaks within the second third of all detected peaks.
Returns
-------
NumPy array of floating point values containing the direction angle in degree.
If a direction angle is invalid or missing, the returned value will be BACKGROUND_COLOR instead.
"""
return_value = pymp.shared.array((roiset.shape[0], 1), dtype=numpy.float)
pbar = tqdm.tqdm(total=len(roiset), desc='Non crossing direction')
number_of_finished_pixels = pymp.shared.array(CPU_COUNT, dtype=numpy.long)
last_sum_of_finished_pixels = 0
active_cores = pymp.shared.array(CPU_COUNT, dtype=numpy.bool)
active_cores[:] = True
with pymp.Parallel(CPU_COUNT) as p:
number_of_finished_pixels[p.thread_num] = 0
for i in p.range(0, len(roiset)):
roi = roiset[i]
peaks = all_peaks(roi, cut_edges)
peaks = accurate_peak_positions(peaks, roi, low_prominence, high_prominence)
return_value[i] = non_crossing_direction(peaks, len(roi) // 2)
number_of_finished_pixels[p.thread_num] += 1
if p.thread_num == 0 and number_of_finished_pixels[p.thread_num] % 1000 == 0:
sum_of_finished_pixels = numpy.sum(number_of_finished_pixels)
pbar.update(sum_of_finished_pixels - last_sum_of_finished_pixels)
last_sum_of_finished_pixels = sum_of_finished_pixels
# When one core has finished, mark it. As long as not all threads are finished continue to update the
# progress bar.
active_cores[p.thread_num] = False
if p.thread_num == 0:
while numpy.any(active_cores == True):
time.sleep(0.5)
sum_of_finished_pixels = numpy.sum(number_of_finished_pixels)
pbar.update(sum_of_finished_pixels - last_sum_of_finished_pixels)
last_sum_of_finished_pixels = sum_of_finished_pixels
pbar.close()
return return_value
def create_sampling(line_profile, peak_positions, left_bound, right_bound, target_peak_height,
number_of_samples=NUMBER_OF_SAMPLES):
"""
Parameters
----------
line_profile: Original line profile used to detect all peaks. This array will be further
analyzed to better determine the peak positions.
peak_positions: Detected peak positions of the 'all_peaks' method.
left_bound: Left bound for linear interpolation.
right_bound: Right bound for linear interpolation.
target_peak_height: Targeted peak height for centroid calculation.
number_of_samples: Number of samples used for linear interpolation.
Returns
-------
Linear interpolated array, new left bound, new right bound for centroid calculation.
"""
sampling = numpy.interp(numpy.arange(left_bound - 1, right_bound + 1, 1 / NUMBER_OF_SAMPLES),
numpy.arange(left_bound - 1, right_bound + 1), line_profile[left_bound - 1:right_bound + 1])
if line_profile[left_bound] > target_peak_height:
_left_bound = number_of_samples
else:
choices = numpy.argwhere(sampling[:(peak_positions - left_bound + 1) * number_of_samples] < target_peak_height)
if len(choices) > 0:
_left_bound = choices.max()
else:
_left_bound = number_of_samples
if line_profile[right_bound] > target_peak_height:
_right_bound = len(sampling) - number_of_samples
else:
choices = numpy.argwhere(sampling[(peak_positions - left_bound + 1) * number_of_samples:] < target_peak_height)
if len(choices) > 0:
_right_bound = (peak_positions - left_bound + 1) * number_of_samples + choices.min()
else:
_right_bound = len(sampling) - number_of_samples
return sampling, _left_bound, _right_bound
def centroid_correction(line_profile, peak_positions, low_prominence=TARGET_PROMINENCE, high_prominence=numpy.inf):
"""
Correct peak positions from a line profile by looking at only the peak with a given threshold using a centroid
calculation. If a minimum is found in the considered interval, this minimum will be used as the limit instead.
The range for the peak correction is limited by MAX_DISTANCE_FOR_CENTROID_ESTIMATION.
Parameters
----------
line_profile: Original line profile used to detect all peaks. This array will be further
analyzed to better determine the peak positions.
peak_positions: Detected peak positions of the 'all_peaks' method.
low_prominence: Lower prominence bound for detecting a peak.
high_prominence: Higher prominence bound for detecting a peak.
Returns
-------
NumPy array with the positions of all detected peak positions corrected with the centroid calculation.
"""
reverse_roi = -1 * line_profile
minima, _ = find_peaks(reverse_roi, prominence=(low_prominence, high_prominence))
centroid_maxima = peak_positions.copy().astype('float32')
for i in range(peak_positions.shape[0]):
peak = peak_positions[i]
target_peak_height = line_profile[peak_positions[i]] - line_profile[peak_positions].max() * \
(1 - TARGET_PEAK_HEIGHT)
minima_distances = peak - minima
left_position = right_position = peak
# Check for minima in left and set left position accordingly
target_distances = (minima_distances <= MAX_DISTANCE_FOR_CENTROID_ESTIMATION) & (minima_distances > 0)
if target_distances.any():
left_position = peak - minima_distances[target_distances].min()
# Look for peak height
below_target_peak_height = numpy.argwhere(
line_profile[peak - MAX_DISTANCE_FOR_CENTROID_ESTIMATION: peak] < target_peak_height)
if len(below_target_peak_height) > 0:
below_target_peak_height = below_target_peak_height.max()
temp_left_position = peak - MAX_DISTANCE_FOR_CENTROID_ESTIMATION + below_target_peak_height
if temp_left_position < left_position:
left_position = temp_left_position
else:
temp_left_position = peak - MAX_DISTANCE_FOR_CENTROID_ESTIMATION
if temp_left_position < left_position:
left_position = temp_left_position
# Repeat for right bound
target_distances = (minima_distances >= -MAX_DISTANCE_FOR_CENTROID_ESTIMATION) & (minima_distances < 0)
if target_distances.any():
right_position = peak - minima_distances[target_distances].min()
# Look for 80% of the peak height
below_target_peak_height = numpy.argwhere(
line_profile[peak: peak + MAX_DISTANCE_FOR_CENTROID_ESTIMATION] < target_peak_height)
if len(below_target_peak_height) > 0:
below_target_peak_height = below_target_peak_height.min()
temp_right_position = peak + MAX_DISTANCE_FOR_CENTROID_ESTIMATION - below_target_peak_height
if temp_right_position > right_position:
right_position = temp_right_position
else:
temp_right_position = peak + MAX_DISTANCE_FOR_CENTROID_ESTIMATION
if temp_right_position > right_position:
right_position = temp_right_position
sampling, left_bound, right_bound = create_sampling(line_profile, peak, left_position, right_position,
target_peak_height)
integer_left_pos = (left_position - 1) + 1 / NUMBER_OF_SAMPLES * left_bound
integer_right_pos = (left_position - 1) + 1 / NUMBER_OF_SAMPLES * right_bound
# Move at max one step size on the x-coordinate axis to the left or right to prevent too much movement
centroid = numpy.sum(numpy.arange(integer_left_pos, integer_right_pos - 1e-10, 0.01) *
sampling[left_bound:right_bound]) / numpy.sum(sampling[left_bound:right_bound])
if numpy.abs(centroid - peak) > 1:
centroid = peak + numpy.sign(centroid - peak)
centroid_maxima[i] = centroid
return centroid_maxima
def read_image(FILEPATH):
"""
Reads image file and returns it.
Supported file formats: NIfTI, Tiff.
Arguments:
FILEPATH: Path to image
Returns:
numpy.array: Image with shape [x, y, z] where [x, y] is the size of a single image and z specifies the number
of measurements
"""
# Load NIfTI dataset
if FILEPATH.endswith('.nii'):
data = nibabel.load(FILEPATH).get_fdata()
data = numpy.squeeze(numpy.swapaxes(data, 0, 1))
elif FILEPATH.endswith('.tif') or FILEPATH.endswith('.tiff'):
data = tifffile.imread(FILEPATH)
data = numpy.squeeze(numpy.moveaxis(data, 0, -1))
else:
raise ValueError('Datatype not supported. Expected .nii or .tiff/.tif file with three dimensions.')
if len(data.shape) < 3:
raise ValueError('Datatype not supported. Expected .nii or .tiff/.tif file with three dimensions.')
return data
def create_background_mask(IMAGE, threshold=10):
"""
Creates a background mask by setting all image pixels with low scattering signals to zero. As all background pixels are near zero for all images in the SLI image stack, this method should remove most of the background allowing for better approximations using the
available features. It is advised to use this function.
Arguments:
IMAGE: 2D/3D-image containing the z-axis in the last dimension
Keyword Arguments:
threshold: Threshhold for mask creation (default: {10})
Returns:
numpy.array: 1D/2D-image which masks the background as True and foreground as False
"""
mask = numpy.max(IMAGE < threshold, axis=-1)
return mask
def create_roiset(IMAGE, ROISIZE=1, extend=True):
"""
Create roi set of the given image by creating an image containing the average value of pixels within the
specified ROISIZE. The returned image will have twice the size in the third axis as the both halfs will be doubled
for the peak detection.
Arguments:
IMAGE: Image containing multiple images in a 3D-stack
ROISIZE: Size in pixels which are used to create the region of interest image
Returns:
numpy.array: Image with shape [x/ROISIZE, y/ROISIZE, 2*'number of measurements'] containing the average value
of the given roi for each image in z-axis.
"""
# Get image dimensions
x = IMAGE.shape[0]
y = IMAGE.shape[1]
number_of_measurements = IMAGE.shape[2]
nx = numpy.ceil(x / ROISIZE).astype('int')
ny = numpy.ceil(y / ROISIZE).astype('int')
if extend:
roi_set = pymp.shared.array((nx * ny, 2 * number_of_measurements), dtype='float32')
else:
roi_set = pymp.shared.array((nx * ny, number_of_measurements), dtype='float32')
# ROISIZE == 1 is exactly the same as the original image
if ROISIZE > 1:
with pymp.Parallel(CPU_COUNT) as p:
for i in p.range(0, nx):
for j in range(0, ny):
# Create average of selected ROI and append two halfs to the front and back
roi = IMAGE[ROISIZE * i:ROISIZE * i + ROISIZE, ROISIZE * j:ROISIZE * j + ROISIZE, :]
average_per_dimension = numpy.average(numpy.average(roi, axis=1), axis=0).flatten()
if extend:
average_per_dimension = numpy.concatenate(
(average_per_dimension[-number_of_measurements // 2:], average_per_dimension,
average_per_dimension[:number_of_measurements // 2]))
roi_set[i * ny + j] = average_per_dimension
else:
with pymp.Parallel(CPU_COUNT) as p:
for i in p.range(0, nx):
for j in range(0, ny):
roi = IMAGE[i, j, :]
if extend:
roi = numpy.concatenate((roi[-number_of_measurements // 2:], roi,
roi[:number_of_measurements // 2]))
roi_set[i * ny + j] = roi
return roi_set
def smooth_roiset(roiset, range=45, polynom_order=2):
"""
Applies Savitzky-Golay filter to given roiset and returns the smoothened measurement.
Args:
roiset: Flattened image with the dimensions [x*y, z] where z equals the number of measurements
range: Used window length for the Savitzky-Golay filter
polynom_order: Used polynomial order for the Savitzky-Golay filter
Returns: Line profiles with applied Savitzky-Golay filter and the same shape as the original roi set.
"""
roiset_rolled = pymp.shared.array(roiset.shape, dtype='float32')
with pymp.Parallel(CPU_COUNT) as p:
for i in p.range(len(roiset)):
roi = roiset[i]
# Extension of the range to include circularity.
roi_c = numpy.concatenate((roi, roi, roi))
roi_rolled = savgol_filter(roi_c, range, polynom_order)
# Shrink array back down to it's original size
roi_rolled = roi_rolled[len(roi):-len(roi)]
roiset_rolled[i] = roi_rolled
return roiset_rolled
def normalize(roi, kind_of_normalization=0):
"""
Normalize given line profile by using a normalization technique based on the kind_of_normalization parameter.
0 : Scale line profile to be between 0 and 1
1 : Divide line profile through its mean value
Arguments:
roi: Line profile of a single pixel / region of interest
kind_of_normalization: Normalization technique which will be used for the calculation
Returns:
numpy.array -- Normalized line profile of the given roi parameter
"""
roi = roi.copy().astype('float32')
if not numpy.all(roi == 0):
if roi.max() == roi.min():
normalized_roi = numpy.ones(roi.shape)
else:
if kind_of_normalization == 0:
normalized_roi = (roi - roi.min()) / (roi.max() - roi.min())
elif kind_of_normalization == 1:
normalized_roi = roi / numpy.mean(roi)
return normalized_roi
return roi
def reshape_array_to_image(image, x, ROISIZE):
"""
Convert array back to image keeping the lower resolution based on the ROISIZE.
Arguments:
image: Array created by other methods with lower resolution based on ROISIZE
x: Size of original image in x-dimension
ROISIZE: Size of the ROI used for evaluating the roiset
Returns:
numpy.array -- Reshaped image based on the input array
"""
if image.shape[-1] == 1 or len(image.shape) == 1:
image_reshaped = image.reshape(
(numpy.ceil(x / ROISIZE).astype('int'), image.shape[0] // numpy.ceil(x / ROISIZE).astype('int')))
else:
image_reshaped = image.reshape(
(
numpy.ceil(x / ROISIZE).astype('int'),
image.shape[0] // numpy.ceil(x / ROISIZE).astype('int'),
image.shape[-1]
)
)
return image_reshaped
| 47.065614 | 266 | 0.685865 |
322c3cadec788710b2de57173c1bc8e648bd6fc7
| 3,731 |
py
|
Python
|
IdeaProjects/PandasProj/PandasCourse2.py
|
sinomiko/project
|
00fadb0033645f103692f5b06c861939a9d4aa0e
|
[
"BSD-3-Clause"
] | 1 |
2018-12-30T14:07:42.000Z
|
2018-12-30T14:07:42.000Z
|
IdeaProjects/PandasProj/PandasCourse2.py
|
sinomiko/project
|
00fadb0033645f103692f5b06c861939a9d4aa0e
|
[
"BSD-3-Clause"
] | null | null | null |
IdeaProjects/PandasProj/PandasCourse2.py
|
sinomiko/project
|
00fadb0033645f103692f5b06c861939a9d4aa0e
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
import pandas as pd
import numpy as np
# 二、Pandas 常见的基本方法
#
# 2.1 数据读取与存储
#
# Pandas 支持大部分常见数据文件读取与存储。一般清楚下,读取文件的方法以 pd.read_ 开头,而写入文件的方法以 pd.to_ 开头。详细的表格如下。
#
# 此处输入图片的描述
#
# 拿刚刚下载好的数据文件举例,如果没有下载,请看 1.5 小节。
df = pd.read_csv("los_census.csv") #读取 csv 文件
print df
# 可以看到,文件已经读取出来了。由于列数太多,所以分段显示了。输出的最下方会有一个行数和列数的统计。这里是 319 行 X 7 列。
# 我们可以发现,由 pandas 读取的文件就已经是 DataFrame 结构了。上面演示了 csv 文件的读取,其余格式的文件也很相似。
#
# 不过,很多时候我们拿到手的数据是像 los_census.txt 文件样式的数据,如下图所示
df2 = pd.read_table("los_census.txt") #读取 txt 文件
print df2
# 其实 los_census.txt 也就是 los_census.csv 文件,因为 csv 文件又叫逗号分隔符文件,数据之间采用逗号分割。
#
# 那么,我们怎样将这种文件转换为 DataFrame 结构的数据呢?这里就要使用到读取方法中提供的一些参数了,例如 sep[] 分隔符参数。
df3 = pd.read_table("los_census.txt", sep=',') #读取 txt 文件
print df3
# 除了 sep,读取文件时常用的参数还有:
#
# header=,用来选择将第几行作为列索引名称。
# names=[],自定义列索引名称。
df4 = pd.read_csv("los_census.csv", header=1 ) #将第二行作为列索引名称。
print df4
df5 = pd.read_csv("los_census.csv", names=['A', 'B', 'C', 'D', 'E', 'F', 'G']) #自定义列索引名称。
print df5
# 好了,说了这么久的读取文件,再说一说存储文件。存储文件的方法也很简单。比如我们将 los_census.csv 文件,存储为 json 格式的文件。
df6 = pd.read_csv("los_census.csv") #读取 csv 文件
df6.to_json("1.json") # 将其存储为 json 格式文件
# 当然,你也可以通过 to_excel("1.xlsx") 储存为 Excel 默认支持的 .xlsx 格式。只是,需要注意在线环境会报错。这时候需要再补充安装 openpyxl 包就好了:
#
# sudo pip install openpyxl
# 2.2 Head & Tail
#
# 有些时候,我们读取的文件很大。如果全部输出预览这些文件,既不美观,又很耗时。还好,Pandas 提供了 head() 和 tail() 方法,它可以帮助我们只预览一小块数据。
#
# 顾名思义,head() 方法就是从数据集开头预览,不带参数默认显示头部的 5 条数据,你也可以自定义显示条数。
df21 = pd.read_csv("los_census.csv") #读取 csv 文件
print df21.head() # 默认显示前 5 条
print df21.head(7) # 显示前 7 条
# tail() 方法就是从数据集尾部开始显示了,同样默认 5 条,可自定义。
print df21.tail() # 默认显示后 5 条
print df21.tail(7) # 显示后 7 条
# 2.3 统计方法
#
# Pandas 提供了几个统计和描述性方法,方便你从宏观的角度去了解数据集。
#
# 1. describe()
#
# describe() 相当于对数据集进行概览,会输出该数据集的计数、最大值、最小值等。
print df21.describe()
# 例如上面,针对一个 DataFrame 会对每一列的数据单独统计。
# 2. idxmin() & idxmax()
#
# idxmin() 和 idxmax() 会计算最小、最大值对应的索引标签。
print df21.idxmin()
print df21.idxmax()
# 3. count()
#
# count() 用于统计非空数据的数量。
print df21.count()
# 4.value_counts()
#
# value_counts() 仅仅针对 Series,它会计算每一个值对应的数量统计。
s = pd.Series(np.random.randint(0, 9, size=100)) # 生成一个 Series,并在 0-9 之间生成 100 个随机值。
print s
print s.value_counts()
# 2.4 计算方法
#
# 除了统计类的方法,Pandas 还提供了很多计算类的方法。
#
# 1. sum()
#
# sum() 用于计算数值数据的总和。
print df21.sum()
# 2. mean()
#
# mean() 用于计算数值数据的平均值。
print df21.mean()
# 3. median()
#
# median() 用于计算数值数据的算术中值。
print df21.median()
# 2.5 标签对齐
#
# 索引标签是 Pandas 中非常重要的特性,有些时候,由于数据的缺失等各种因素导致标签错位的现象,或者想匹配新的标签。于是 Pandas 提供了索引标签对齐的方法 reindex()。
#
# reindex() 主要有三个作用:
#
# 重新排序现有数据以匹配新的一组标签。
# 在没有标签对应数据的位置插入缺失值(NaN)标记。
# 特殊情形下,使用逻辑填充缺少标签的数据(与时间序列数据高度相关)。
s25 = pd.Series(data=[1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
print s25
print s25.reindex(['e', 'b', 'f', 'd'])
# 我们可以看到,重新排列的数据中,原有索引对应的数据能自动匹配,而新索引缺失的数据通过 NaN 补全。
#
# 当然,对于 DataFrame 类型的数据也是一样的。
df26 = pd.DataFrame(data={'one': [1, 2, 3], 'two': [4, 5, 6], 'three': [7, 8, 9]}, index=['a', 'b', 'c'])
print df26
print df26.reindex(index=['b', 'c', 'a'], columns=['three', 'two', 'one'])
# 你甚至还可以将上面 Series 的数据按照下面的 DataFrame 的索引序列对齐。
print s25.reindex(df26.index)
# 2.6 排序
#
# 既然是数据处理,就少不了排序这一常用的操作。在 Pandas 中,排序拥有很多「姿势」,下面就一起来看一看。
#
# 1. 按索引排序
#
# 首先是按照索引排序,其方法为Series.sort_index()或者是DataFrame.sort_index()。
df261 = pd.DataFrame(data={'one': [1, 2, 3], 'two': [4, 5, 6], 'three': [7, 8, 9], 'four': [10, 11, 12]}, index=['a', 'c', 'b'])
print df261
# 下面按索引对行重新排序:
print df261.sort_index()
# 或者添加参数,进行倒序排列:
print df261.sort_index(ascending=False)
# 1. 按数值排序
#
# 第二种是按照数值排序,其方法为Series.sort_values()或者是DataFrame.sort_values()。举个例子:
# 将第三列按照从小到大排序:
print df261.sort_values(by='three')
# 也可以同时按照两列
print df261[['one', 'two', 'three', 'four']].sort_values(by=['one','two'])
| 22.612121 | 128 | 0.691236 |
0889fa2aa3e3f7524a17275a5e401ed854c8c3f3
| 592 |
py
|
Python
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 82/82.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 101 |
2021-12-20T11:57:11.000Z
|
2022-03-23T09:49:13.000Z
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 82/82.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 4 |
2022-01-12T11:55:56.000Z
|
2022-02-12T04:53:33.000Z
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 82/82.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 38 |
2022-01-12T11:56:16.000Z
|
2022-03-23T10:07:52.000Z
|
#Use Python to calculate the distance (in AU units) between Jupiter and Sun on January 1, 1230.
#A: I didn't know this so I did some internet research and reveal that ephem is used for astronomical clculations.
#The ephem library was installed with pip, and for Windows a precompiled library from http://www.lfd.uci.edu/~gohlke/pythonlibs/#pyephem was installed with pip
#Try to find your own version from that page
import ephem
jupiter = ephem.Jupiter()
jupiter.compute('1230/1/1')
distance_au_units = jupiter.sun_distance
distance_km = distance_au_units * 149597870.691
print(distance_km)
| 49.333333 | 159 | 0.793919 |
08f612cde2c2fa73d6466afe8e1bd9db483c0d2f
| 8,139 |
py
|
Python
|
hihope_neptune-oh_hid/00_src/v0.1/third_party/LVM2/test/dbus/testlib.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | 1 |
2022-02-15T08:51:55.000Z
|
2022-02-15T08:51:55.000Z
|
hihope_neptune-oh_hid/00_src/v0.3/third_party/LVM2/test/dbus/testlib.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
hihope_neptune-oh_hid/00_src/v0.3/third_party/LVM2/test/dbus/testlib.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import string
import random
import functools
import xml.etree.ElementTree as Et
from collections import OrderedDict
import dbus
import os
import sys
import time
BUS_NAME = os.getenv('LVM_DBUS_NAME', 'com.redhat.lvmdbus1')
BASE_INTERFACE = 'com.redhat.lvmdbus1'
MANAGER_INT = BASE_INTERFACE + '.Manager'
MANAGER_OBJ = '/' + BASE_INTERFACE.replace('.', '/') + '/Manager'
PV_INT = BASE_INTERFACE + ".Pv"
VG_INT = BASE_INTERFACE + ".Vg"
LV_INT = BASE_INTERFACE + ".Lv"
THINPOOL_INT = BASE_INTERFACE + ".ThinPool"
SNAPSHOT_INT = BASE_INTERFACE + ".Snapshot"
LV_COMMON_INT = BASE_INTERFACE + ".LvCommon"
JOB_INT = BASE_INTERFACE + ".Job"
CACHE_POOL_INT = BASE_INTERFACE + ".CachePool"
CACHE_LV_INT = BASE_INTERFACE + ".CachedLv"
THINPOOL_LV_PATH = '/' + THINPOOL_INT.replace('.', '/')
validate_introspection = True
def rs(length, suffix, character_set=string.ascii_lowercase):
return ''.join(random.choice(character_set) for _ in range(length)) + suffix
def mib(s):
return 1024 * 1024 * s
def std_err_print(*args):
sys.stderr.write(' '.join(map(str, args)) + '\n')
sys.stderr.flush()
class DbusIntrospection(object):
@staticmethod
def introspect(xml_representation):
interfaces = {}
root = Et.fromstring(xml_representation)
for c in root:
if c.tag == "interface":
in_f = c.attrib['name']
interfaces[in_f] = dict(methods=OrderedDict(), properties={})
for nested in c:
if nested.tag == "method":
mn = nested.attrib['name']
interfaces[in_f]['methods'][mn] = OrderedDict()
for arg in nested:
if arg.tag == 'arg':
arg_dir = arg.attrib['direction']
if arg_dir == 'in':
n = arg.attrib['name']
else:
n = 'RETURN_VALUE'
arg_type = arg.attrib['type']
if n:
v = dict(
name=mn,
a_dir=arg_dir,
a_type=arg_type
)
interfaces[in_f]['methods'][mn][n] = v
elif nested.tag == 'property':
pn = nested.attrib['name']
p_access = nested.attrib['access']
p_type = nested.attrib['type']
interfaces[in_f]['properties'][pn] = \
dict(p_access=p_access, p_type=p_type)
else:
pass
# print('Interfaces...')
# for k, v in list(interfaces.items()):
# print('Interface %s' % k)
# if v['methods']:
# for m, args in list(v['methods'].items()):
# print(' method: %s' % m)
# for a, aa in args.items():
# print(' method arg: %s type %s' %
# (a, aa['a_type']))
# if v['properties']:
# for p, d in list(v['properties'].items()):
# print(' Property: %s type= %s' % (p, d['p_type']))
# print('End interfaces')
return interfaces
def btsr(value):
t = type(value)
if t == dbus.Boolean:
return 'b'
elif t == dbus.ObjectPath:
return 'o'
elif t == dbus.String:
return 's'
elif t == dbus.Byte:
return 'y'
elif t == dbus.Int16:
return 'n'
elif t == dbus.Int32:
return 'i'
elif t == dbus.Int64:
return 'x'
elif t == dbus.UInt16:
return 'q'
elif t == dbus.UInt32:
return 'u'
elif t == dbus.UInt64:
return 't'
elif t == dbus.Double:
return 'd'
elif t == dbus.Struct:
rc = '('
for vt in value:
rc += btsr(vt)
rc += ')'
return rc
elif t == dbus.Array:
rc = "a"
for i in value:
rc += btsr(i)
break
return rc
else:
raise RuntimeError("Unhandled type %s" % str(t))
def verify_type(value, dbus_str_rep):
actual_str_rep = btsr(value)
if dbus_str_rep != actual_str_rep:
# print("%s ~= %s" % (dbus_str_rep, actual_str_rep))
# Unless we have a full filled out type we won't match exactly
if not dbus_str_rep.startswith(actual_str_rep):
raise RuntimeError(
"Incorrect type, expected= %s actual = %s object= %s" %
(dbus_str_rep, actual_str_rep, str(type(value))))
class RemoteInterface(object):
def _set_props(self, props=None):
if not props:
for _ in range(0, 3):
try:
prop_interface = dbus.Interface(self.dbus_object,
'org.freedesktop.DBus.Properties')
props = prop_interface.GetAll(self.interface)
break
except dbus.exceptions.DBusException as dbe:
if "GetAll" not in str(dbe):
raise dbe
if props:
for kl, vl in list(props.items()):
# Verify type is correct!
if self.introspect:
verify_type(vl, self.introspect[self.interface]
['properties'][kl]['p_type'])
setattr(self, kl, vl)
@property
def object_path(self):
return self.dbus_object.object_path
def __init__(
self, dbus_object, interface, introspect,
properties=None, timelimit=-1):
self.dbus_object = dbus_object
self.interface = interface
self.introspect = introspect
self.tmo = 0
if timelimit >= 0:
self.tmo = float(timelimit)
self.tmo *= 1.10
self.dbus_interface = dbus.Interface(self.dbus_object, self.interface)
self._set_props(properties)
def __getattr__(self, item):
if hasattr(self.dbus_interface, item):
return functools.partial(self._wrapper, item)
else:
return functools.partial(self, item)
def _wrapper(self, _method_name, *args, **kwargs):
# Lets see how long a method takes to execute, in call cases we should
# return something when the time limit has been reached.
start = time.time()
result = getattr(self.dbus_interface, _method_name)(*args, **kwargs)
end = time.time()
diff = end - start
if self.tmo > 0.0:
if diff > self.tmo:
std_err_print("\n Time exceeded: %f > %f %s" %
(diff, self.tmo, _method_name))
if self.introspect:
if 'RETURN_VALUE' in self.introspect[
self.interface]['methods'][_method_name]:
r_type = self.introspect[
self.interface]['methods'][
_method_name]['RETURN_VALUE']['a_type']
verify_type(result, r_type)
return result
def update(self):
self._set_props()
class ClientProxy(object):
@staticmethod
def _intf_short_name(nm):
return nm.split('.')[-1:][0]
def get_introspect(self):
i = dbus.Interface(
self.dbus_object,
'org.freedesktop.DBus.Introspectable')
return DbusIntrospection.introspect(i.Introspect())
def _common(self, interface, introspect, properties):
short_name = ClientProxy._intf_short_name(interface)
self.short_interface_names.append(short_name)
ro = RemoteInterface(self.dbus_object, interface, introspect,
properties, timelimit=self.tmo)
setattr(self, short_name, ro)
def __init__(self, bus, object_path, interface_prop_hash=None,
interfaces=None, timelimit=-1):
self.object_path = object_path
self.short_interface_names = []
self.tmo = timelimit
self.dbus_object = bus.get_object(
BUS_NAME, self.object_path, introspect=False)
if interface_prop_hash:
assert interfaces is None
if interfaces:
assert interface_prop_hash is None
if interface_prop_hash and not validate_introspection:
# We have everything including the values of the properties
for i, props in interface_prop_hash.items():
self._common(i, None, props)
elif interfaces and not validate_introspection:
# We are retrieving the values of the properties
for i in interfaces:
self._common(i, None, None)
else:
# We need to query the interfaces and gather all the properties
# for each interface, as we have the introspection data we
# will also utilize it to verify what we get back verifies
introspect = self.get_introspect()
if interface_prop_hash:
introspect_interfaces = list(introspect.keys())
for object_manager_key in interface_prop_hash.keys():
assert object_manager_key in introspect_interfaces
for i in list(introspect.keys()):
self._common(i, introspect, None)
def update(self):
# Go through all interfaces and update them
for sn in self.short_interface_names:
getattr(self, sn).update()
| 27.220736 | 77 | 0.675513 |
eb014668e291f4c33e5d7b62d8f19a18367dba12
| 921 |
py
|
Python
|
Algorithms/2_Implementation/62.py
|
abphilip-codes/Hackerrank_DSA
|
bb9e233d9d45c5b14c138830602695ad4113fba4
|
[
"MIT"
] | 1 |
2021-11-25T13:39:30.000Z
|
2021-11-25T13:39:30.000Z
|
Algorithms/2_Implementation/62.py
|
abphilip-codes/Hackerrank_DSA
|
bb9e233d9d45c5b14c138830602695ad4113fba4
|
[
"MIT"
] | null | null | null |
Algorithms/2_Implementation/62.py
|
abphilip-codes/Hackerrank_DSA
|
bb9e233d9d45c5b14c138830602695ad4113fba4
|
[
"MIT"
] | null | null | null |
# https://www.hackerrank.com/challenges/3d-surface-area/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'surfaceArea' function below.
#
# The function is expected to return an INTEGER.
# The function accepts 2D_INTEGER_ARRAY A as parameter.
#
def surfaceArea(A):
x = [[0]*(len(A[0])+2)]+[[0]+z+[0] for z in A]+[[0]*(len(A[0])+2)]
ans = [abs(x[z][y]-x[z-1][y])+abs(x[z][y]-x[z][y-1]) for z in range(1,len(x)) for y in range(1,len(x[z]))]
return sum(ans)+(len(A)*len(A[0])*2)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
H = int(first_multiple_input[0])
W = int(first_multiple_input[1])
A = []
for _ in range(H):
A.append(list(map(int, input().rstrip().split())))
result = surfaceArea(A)
fptr.write(str(result) + '\n')
fptr.close()
| 22.463415 | 110 | 0.618893 |
debfc54ae8cddc984ade037ca457a6cafe80a638
| 2,648 |
py
|
Python
|
test/test_npu/test_network_ops/test_confusion_transpose_backward.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/test_confusion_transpose_backward.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/test_confusion_transpose_backward.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020 Huawei Technologies Co., Ltd
# Copyright (c) 2019, Facebook CORPORATION.
# All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from common_utils import TestCase, run_tests
from common_device_type import instantiate_device_type_tests
from util_test import create_common_tensor
class TestConfusionTransposeDBackward(TestCase):
def npu_op_exec(self, input1, shape, perm, transpose_first):
input1.requires_grad_()
output = torch.npu_confusion_transpose(input1, perm, shape, transpose_first)
output.backward(torch.ones_like(output))
output1 = output.detach().cpu().numpy()
output2 = input1.grad.cpu().numpy()
return output1, output2
def cpu_op_exec(self, input1, shape, perm, transpose_first):
input1.requires_grad_()
if transpose_first:
output = input1.permute(*perm).contiguous().view(shape)
else:
output = input1.view(shape).permute(*perm)
output.backward(torch.ones_like(output))
output1 = output.detach().numpy()
output2 = input1.grad.numpy()
return output1, output2
def test_confusion_transpose_backward(self, device):
shape_format = [
[[np.float32, 0, [1, 576, 2560]],[1, 576, 32, 80], (0, 2, 1, 3), False],
[[np.float32, 0, [1, 32, 576, 80]],[1, 576, 2560], (0, 2, 1, 3), True],
[[np.float16, 0, [1, 576, 2560]], [1, 576, 32, 80], (0, 2, 1, 3), False],
[[np.float16, 0, [1, 32, 576, 80]], [1, 576, 2560], (0, 2, 1, 3), True],
]
for item in shape_format:
cpu_input, npu_input = create_common_tensor(item[0], 0, 100)
cpu_output1, cpu_output2 = self.cpu_op_exec(cpu_input, item[1], item[2], item[3])
npu_output1, npu_output2 = self.npu_op_exec(npu_input, item[1], item[2], item[3])
self.assertRtolEqual(cpu_output1, npu_output1)
self.assertRtolEqual(cpu_output2, npu_output2)
instantiate_device_type_tests(TestConfusionTransposeDBackward, globals(), except_for='cpu')
if __name__ == "__main__":
run_tests()
| 44.133333 | 93 | 0.672961 |
def32a8fceaa7d1439b301e7160c0245a2e3ec89
| 1,786 |
py
|
Python
|
x/fetch/main.py
|
miku/lc-extra
|
afeb3cb5f532069be17c54d6508aa7c0c8373c2a
|
[
"MIT"
] | null | null | null |
x/fetch/main.py
|
miku/lc-extra
|
afeb3cb5f532069be17c54d6508aa7c0c8373c2a
|
[
"MIT"
] | null | null | null |
x/fetch/main.py
|
miku/lc-extra
|
afeb3cb5f532069be17c54d6508aa7c0c8373c2a
|
[
"MIT"
] | 1 |
2020-02-04T08:09:01.000Z
|
2020-02-04T08:09:01.000Z
|
#!/usr/bin/env python
#
# https://sigel.staatsbibliothek-berlin.de/api/hydra/?q=*
#
# {
# "@context": "https://sigel.staatsbibliothek-berlin.de/typo3conf/ext/zdb_json_api/Resources/Public/context.jsonld",
# "id": "https://sigel.staatsbibliothek-berlin.de/api/hydra/?q=%2A",
# "type": "Collection",
# "freetextQuery": "*",
# "totalItems": 17735,
#
# "view": {
# "type": "PartialCollectionView",
# "id": "https://sigel.staatsbibliothek-berlin.de/api/hydra/?q=%2A&page=1",
# "totalItems": 10,
# "pageIndex": 1,
# "numberOfPages": 1774,
# "offset": 1,
# "limit": 10,
# "first": "https://sigel.staatsbibliothek-berlin.de/api/hydra/?q=%2A&page=1",
# "last": "https://sigel.staatsbibliothek-berlin.de/api/hydra/?q=%2A&page=1774",
# "next": "https://sigel.staatsbibliothek-berlin.de/api/hydra/?q=%2A&page=2"
# },
#
import requests
import sys
OUTFILE = "data.json"
with open(OUTFILE, "w") as output:
# Variablezuweisung
page = 1
# Schleife (endlos)
while True:
# Zuweisung
url = "https://sigel.staatsbibliothek-berlin.de/api/hydra/?page={}&q=*".format(page)
# Print
print(url, file=sys.stderr)
# Variablenzuweisung
resp = requests.get(url)
if resp.status_code >= 400:
raise RuntimeError('got %s on %s', resp.status, url)
# Variablenzuweisung
doc = resp.json()
# Aufruf
output.write(resp.text)
output.write("\n")
# Variablenzuweisung
total = doc["view"]["numberOfPages"]
# Print
print("{} / {}".format(page, total), file=sys.stderr)
# Bedingung / Vergleich
if page == total:
break
# Variablenzuweisung
page += 1
main()
| 25.15493 | 122 | 0.583987 |
9d5efb9dc24f0c0de4631e494d44ffebde7322d1
| 2,423 |
py
|
Python
|
jumeaux/commands/server/main.py
|
ihatov08/jumeaux
|
7d983474df4b6dcfa57ea1a66901fbc99ebababa
|
[
"MIT"
] | 11 |
2017-10-02T01:29:12.000Z
|
2022-03-31T08:37:22.000Z
|
jumeaux/commands/server/main.py
|
ihatov08/jumeaux
|
7d983474df4b6dcfa57ea1a66901fbc99ebababa
|
[
"MIT"
] | 79 |
2017-07-16T14:47:17.000Z
|
2022-03-31T08:49:14.000Z
|
jumeaux/commands/server/main.py
|
ihatov08/jumeaux
|
7d983474df4b6dcfa57ea1a66901fbc99ebababa
|
[
"MIT"
] | 2 |
2019-01-28T06:11:58.000Z
|
2021-01-25T07:21:21.000Z
|
"""Boot mock API server
Usage:
{cli} [--port <port>] [-v|-vv|-vvv]
{cli} (-h | --help)
Options:
--port <port> Running port [default: 8000]
-v Logger level (`-v` or `-vv` or `-vvv`)
-h --help Show this screen.
"""
import json
import socketserver
import urllib
from http.server import SimpleHTTPRequestHandler
from owlmixin import OwlMixin
from jumeaux.logger import Logger, init_logger
logger: Logger = Logger(__name__)
class MyServerHandler(SimpleHTTPRequestHandler):
def do_GET(self):
logger.info_lv2("*" * 80)
logger.info_lv2("<<< Request headers >>>")
logger.info_lv2(self.headers)
SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
logger.info_lv2("*" * 80)
logger.info_lv2("<<< Request headers >>>")
logger.info_lv2(self.headers)
content_type = self.headers.get_content_type()
content_charset = self.headers.get_content_charset() or "utf-8"
if content_type == "application/x-www-form-urlencoded":
logger.info_lv2("<<< Parse as x-www-form-urlencoded.. >>>")
logger.info_lv2(
urllib.parse.parse_qs(
self.rfile.read(int(self.headers.get("content-length"))).decode(
content_charset
),
keep_blank_values=1,
)
)
elif content_type == "application/json":
logger.info_lv2("<<< Parse as json.. >>>")
logger.info_lv2(
json.loads(
self.rfile.read(int(self.headers.get("content-length"))).decode(content_charset),
)
)
else:
logger.info_lv2("<<< Parse as plain string.. >>>")
logger.info_lv2(
self.rfile.read(int(self.headers.get("content-length"))).decode(
content_charset
),
)
SimpleHTTPRequestHandler.do_GET(self)
class ReuseAddressTCPServer(socketserver.TCPServer):
allow_reuse_address = True
class Args(OwlMixin):
port: int
v: int
def run(args: Args):
init_logger(args.v)
with ReuseAddressTCPServer(("", args.port), MyServerHandler) as httpd:
logger.info_lv1(f"Serving HTTP on 0.0.0.0 port {args.port} (http://0.0.0.0:{args.port}/)")
httpd.serve_forever()
| 30.2875 | 101 | 0.57078 |
261d2c174d707e9450cd0b7fd3995fbb4fec6e28
| 112 |
py
|
Python
|
scripts/hello_world_Oliver.py
|
breezage/Hacktoberfest-1
|
6f6d52248c79c0e72fd13b599500318fce3f9ab0
|
[
"MIT"
] | null | null | null |
scripts/hello_world_Oliver.py
|
breezage/Hacktoberfest-1
|
6f6d52248c79c0e72fd13b599500318fce3f9ab0
|
[
"MIT"
] | null | null | null |
scripts/hello_world_Oliver.py
|
breezage/Hacktoberfest-1
|
6f6d52248c79c0e72fd13b599500318fce3f9ab0
|
[
"MIT"
] | 1 |
2019-10-24T06:45:21.000Z
|
2019-10-24T06:45:21.000Z
|
// LANGUAGE: Python
// AUTHOR: Justin Oliver
// GITHUB: https://github.com/justinoliver
print('Hello, World!')
| 18.666667 | 42 | 0.705357 |
13ffeb5f50c02edce4a3a5eacd3e988666c5e7f9
| 1,457 |
py
|
Python
|
tag_generator.py
|
5nizza/5nizza.github.io
|
234cb066164648715f5440be29ff55507b8d2999
|
[
"CC-BY-3.0"
] | null | null | null |
tag_generator.py
|
5nizza/5nizza.github.io
|
234cb066164648715f5440be29ff55507b8d2999
|
[
"CC-BY-3.0"
] | null | null | null |
tag_generator.py
|
5nizza/5nizza.github.io
|
234cb066164648715f5440be29ff55507b8d2999
|
[
"CC-BY-3.0"
] | null | null | null |
#!/usr/bin/env python
"""
This script creates tags for jekyll blog.
Source:
Inspired by http://longqian.me/2017/02/09/github-jekyll-tag/
"""
import glob
import os
post_dir = '_posts/'
tag_dir = 'tag/'
file_names = glob.glob(post_dir + '**/*.md', recursive=True)
tags = set()
for file in file_names:
f = open(file, 'r')
inside_header = False
for line in f:
line = line.strip()
if line == '---':
if inside_header:
break # continue to the next file
inside_header = True
if line.startswith('tags:'):
tags_token = line[5:].strip()
if tags_token.startswith('['):
tags_token = tags_token.strip('[]')
new_tags = [l.strip().strip(" "+"'"+'"')
for l in tags_token.split(',')]
else:
new_tags = tags_token.split()
tags.update(new_tags)
f.close()
old_tags = glob.glob(tag_dir + '*.md')
for tag in old_tags:
os.remove(tag)
if not os.path.exists(tag_dir):
os.makedirs(tag_dir)
for tag in tags:
tag_filename = tag_dir + tag + '.md'
f = open(tag_filename, 'a')
write_str = '---\nlayout: tagpage\ntitle: \"Tag: ' + tag + '\"\ntag: ' + tag + '\nrobots: noindex\n---\n'
f.write(write_str)
f.close()
print("Tags generated ({count}): {tags}".format(count=len(tags),
tags=', '.join(tags)))
| 26.981481 | 109 | 0.540151 |
26e766c55c5afa429c8e1e08831eac89a2455913
| 958 |
py
|
Python
|
Interview Preparation Kits/Interview Preparation Kit/String Manipulation/Special String Again/special_string_count.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | 1 |
2021-02-22T17:37:45.000Z
|
2021-02-22T17:37:45.000Z
|
Interview Preparation Kits/Interview Preparation Kit/String Manipulation/Special String Again/special_string_count.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | null | null | null |
Interview Preparation Kits/Interview Preparation Kit/String Manipulation/Special String Again/special_string_count.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the substrCount function below.
def substrCount(n, s):
counts = []
current = None
count = 0
ans = 0
for i in range(n):
if s[i] == current:
count += 1
else:
if current is not None:
counts.append((current, count))
current = s[i]
count = 1
counts.append((current, count))
length = len(counts)
for j in range(length):
ans += (counts[j][1] + 1) * counts[j][1] // 2
for k in range(1, length-1):
if counts[k-1][0] == counts[k+1][0] and counts[k][1] == 1:
ans += min(counts[k-1][1], counts[k+1][1])
return ans
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
s = input()
result = substrCount(n, s)
fptr.write(str(result) + '\n')
fptr.close()
| 19.16 | 66 | 0.516701 |
f87081fefdd7a7c218bb1c70ed69788ca2e01c52
| 3,942 |
py
|
Python
|
train.py
|
PMingEli/FDSR
|
73563b5e148647fff2712602b1fc8720b8739589
|
[
"MIT"
] | null | null | null |
train.py
|
PMingEli/FDSR
|
73563b5e148647fff2712602b1fc8720b8739589
|
[
"MIT"
] | null | null | null |
train.py
|
PMingEli/FDSR
|
73563b5e148647fff2712602b1fc8720b8739589
|
[
"MIT"
] | null | null | null |
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import torch
import numpy as np
import cv2
import argparse
from models import *
from nyu_dataloader import *
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from torchvision import transforms, utils
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from tqdm import tqdm
import logging
from datetime import datetime
import os
parser = argparse.ArgumentParser()
parser.add_argument('--scale', type=int, default=4, help='scale factor')
parser.add_argument('--parameter', default='./data/parameter/', help='name of parameter file')
parser.add_argument('--model', default='FDSR', help='choose model')
parser.add_argument('--lr', default='0.0005', type=float, help='learning rate')
parser.add_argument('--result', default='./data/result/', help='result path')
parser.add_argument('--epoch', default=1000, type=int, help='max epoch')
opt = parser.parse_args()
print(opt)
s = datetime.now().strftime('%Y%m%d%H%M%S')
result_root = '%s/%s-lr_%s-s_%s'%(opt.result, s, opt.lr, opt.scale)
if not os.path.exists(result_root): os.mkdir(result_root)
logging.basicConfig(filename='%s/train.log'%result_root,format='%(asctime)s %(message)s', level=logging.INFO)
net = Net(num_feats=32, depth_chanels=1, color_channel=3, kernel_size=3).cuda()
net = nn.DataParallel(net)
# net.load_state_dict(torch.load(opt.parameter))
criterion = nn.L1Loss()
optimizer = optim.Adam(net.parameters(), lr=opt.lr)
scheduler = lr_scheduler.StepLR(optimizer, step_size=80000, gamma=0.5)
net.train()
data_transform = transforms.Compose([transforms.ToTensor()])
nyu_dataset = NYU_v2_datset(root_dir='./data/npy', transform=data_transform)
dataloader = torch.utils.data.DataLoader(nyu_dataset, batch_size=1, shuffle=True)
def calc_rmse(a, b,minmax):
a = a[6:-6, 6:-6]
b = b[6:-6, 6:-6]
a = a*(minmax[1]-minmax[0]) + minmax[1]
b = b*(minmax[1]-minmax[0]) + minmax[1]
return np.sqrt(np.mean(np.power(a-b,2)))
@torch.no_grad()
def validate(net, root_dir='./data/npy'):
data_transform = transforms.Compose([
transforms.ToTensor()
])
test_dataset = NYU_v2_datset(root_dir=root_dir, transform=data_transform, train=False)
dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False)
net.eval()
rmse = np.zeros(654)
test_minmax = np.load('%s/test_minmax.npy'%root_dir)
t = tqdm(iter(dataloader), leave=True, total=len(dataloader))
for idx, data in enumerate(t):
# minmax = test_minmax[:,idx]
minmax = test_minmax[idx]
guidance, target, gt = data['guidance'].cuda(), data['target'].cuda(), data['gt'].cuda()
out = net((guidance, target))
rmse[idx] = calc_rmse(gt[0,0].cpu().numpy(), out[0,0].cpu().numpy(),minmax)
t.set_description('[validate] rmse: %f' %rmse[:idx+1].mean())
t.refresh()
return rmse
max_epoch = opt.epoch
for epoch in range(max_epoch):
net.train()
running_loss = 0.0
t = tqdm(iter(dataloader), leave=True, total=len(dataloader))
for idx, data in enumerate(t):
optimizer.zero_grad()
guidance, target, gt = data['guidance'].cuda(), data['target'].cuda(), data['gt'].cuda()
out = net((guidance, target))
loss = criterion(out, gt)
loss.backward()
optimizer.step()
scheduler.step()
running_loss += loss.data.item()
if idx % 50 == 0:
running_loss /= 50
t.set_description('[train epoch(L1):%d] loss: %.10f' % (epoch+1, running_loss))
t.refresh()
logging.info('epoch:%d running_loss:%.10f' % (epoch + 1, running_loss))
rmse = validate(net)
logging.info('epoch:%d --------mean_rmse:%.10f '%(epoch+1, rmse.mean()))
torch.save(net.state_dict(), "%s/parameter%d"%(result_root, epoch+1))
| 32.578512 | 109 | 0.659056 |
3e19cc404d78a16c21bc9cdc7262fa7acc665fbb
| 2,269 |
py
|
Python
|
3_DeepLearning-CNNs/03_CNN_MNIST_Classification/1-CNN_MNIST_Model.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
3_DeepLearning-CNNs/03_CNN_MNIST_Classification/1-CNN_MNIST_Model.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
3_DeepLearning-CNNs/03_CNN_MNIST_Classification/1-CNN_MNIST_Model.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
import os
import numpy as np
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import *
from tensorflow.keras.activations import *
from tensorflow.keras.models import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.initializers import *
from tensorflow.keras.callbacks import *
# Dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Cast to np.float32
x_train = x_train.astype(np.float32)
y_train = y_train.astype(np.float32)
x_test = x_test.astype(np.float32)
y_test = y_test.astype(np.float32)
# Reshape the images to a depth dimension
x_train = np.expand_dims(x_train, axis=-1)
print(x_train)
x_test = np.expand_dims(x_test, axis=-1)
print(x_test)
# Dataset variables
train_size = x_train.shape[0]
test_size = x_test.shape[0]
width, height, depth = x_train.shape[1:] # 28, 28, 1
num_features = width * height * depth # 28x28x1
num_classes = 10
# Compute the categorical classes_list
y_train = to_categorical(y_train, num_classes=num_classes)
y_test = to_categorical(y_test, num_classes=num_classes)
# Model params
lr = 0.001
optimizer = Adam(lr=lr)
epochs = 10
batch_size = 256
# Define the CNN
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=3, padding='same', input_shape=x_train.shape[1:]))
model.add(Activation("relu"))
model.add(Conv2D(filters=32, kernel_size=3, padding='same'))
model.add(Activation("relu"))
model.add(MaxPool2D())
model.add(Conv2D(filters=64, kernel_size=5, padding='same'))
model.add(Activation("relu"))
model.add(Conv2D(filters=64, kernel_size=5, padding='same'))
model.add(Activation("relu"))
model.add(MaxPool2D())
model.add(Flatten())
model.add(Dense(units=128))
model.add(Activation("relu"))
model.add(Dense(units=num_classes))
model.add(Activation("softmax")) # bei Klassifikation über 2 Klassen
# Compile and train (fit) the model, afterwards evaluate the model
model.summary()
model.compile(
loss="categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
model.fit(
x=x_train,
y=y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=[x_test, y_test])
score = model.evaluate(
x_test,
y_test,
verbose=0)
print("Score: ", score)
| 25.494382 | 91 | 0.749229 |
3985796a0bec60a952235752d6d89eea89e17ace
| 11,917 |
py
|
Python
|
src/onegov/directory/migration.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/directory/migration.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/directory/migration.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.directory.models.directory_entry import DirectoryEntry
from onegov.form import as_internal_id
from onegov.form import flatten_fieldsets
from onegov.form import parse_form
from onegov.form import parse_formcode
from sqlalchemy.orm import object_session, joinedload, undefer
from sqlalchemy.orm.attributes import get_history
class DirectoryMigration(object):
""" Takes a directory and the structure/configuration it should have in
the future.
It then migrates the existing directory entries, if possible.
"""
def __init__(self, directory, new_structure=None, new_configuration=None,
old_structure=None):
self.directory = directory
self.old_structure = old_structure or self.old_directory_structure
self.new_structure = new_structure or directory.structure
self.new_configuration = new_configuration or directory.configuration
self.new_form_class = parse_form(self.new_structure)
self.fieldtype_migrations = FieldTypeMigrations()
self.changes = StructuralChanges(
self.old_structure,
self.new_structure
)
@property
def old_directory_structure(self):
history = get_history(self.directory, 'structure')
if history.deleted:
return history.deleted[0]
else:
return self.directory.structure
@property
def possible(self):
if not self.directory.entries:
return True
if not self.changes:
return True
if not self.changes.changed_fields:
return True
for changed in self.changes.changed_fields:
old = self.changes.old[changed]
new = self.changes.new[changed]
# we can turn required into optional fields and vice versa
# (the form validation takes care of validating the requirements)
if old.required != new.required and old.type == new.type:
continue
# we can only convert certain types
if old.required == new.required and old.type != new.type:
if not self.fieldtype_migrations.possible(old.type, new.type):
break
else:
return True
return False
@property
def entries(self):
session = object_session(self.directory)
if not session:
return self.directory.entries
e = session.query(DirectoryEntry)
e = e.filter_by(directory_id=self.directory.id)
e = e.options(joinedload(DirectoryEntry.files))
e = e.options(undefer(DirectoryEntry.content))
return e
def execute(self):
""" To run the migration, run this method. The other methods below
should only be used if you know what you are doing.
"""
assert self.possible
self.migrate_directory()
# Triggers the observer to func::structure_configuration_observer()
# and executing this very function because of an autoflush event
# in a new instance.
for entry in self.entries:
self.migrate_entry(entry)
def migrate_directory(self):
self.directory.structure = self.new_structure
self.directory.configuration = self.new_configuration
def migrate_entry(self, entry):
"""
This function is called after an update to the directory structure.
During execution of self.execute(), the directory is migrated.
On start of looping trough the entries, an auto_flush occurs, calling
the migration observer for the directory, which will instantiate yet
another instance of the migration. After this inside execute(),
the session is not flusing anymore, and we have to skip,
since the values are already migrated and migration will
fail when removing fieldsets.
"""
update = self.changes and True or False
session = object_session(entry)
if not session._flushing:
return
self.migrate_values(entry.values)
self.directory.update(entry, entry.values, force_update=update)
def migrate_values(self, values):
self.add_new_fields(values)
self.remove_old_fields(values)
self.rename_fields(values)
self.convert_fields(values)
def add_new_fields(self, values):
for added in self.changes.added_fields:
added = as_internal_id(added)
values[added] = None
def remove_old_fields(self, values):
for removed in self.changes.removed_fields:
removed = as_internal_id(removed)
del values[removed]
def rename_fields(self, values):
for old, new in self.changes.renamed_fields.items():
old, new = as_internal_id(old), as_internal_id(new)
values[new] = values[old]
del values[old]
def convert_fields(self, values):
for changed in self.changes.changed_fields:
convert = self.fieldtype_migrations.get_converter(
self.changes.old[changed].type,
self.changes.new[changed].type
)
changed = as_internal_id(changed)
values[changed] = convert(values[changed])
class FieldTypeMigrations(object):
""" Contains methods to migrate fields from one type to another. """
def possible(self, old_type, new_type):
return self.get_converter(old_type, new_type) is not None
def get_converter(self, old_type, new_type):
if old_type == 'password':
return # disabled to avoid accidental leaks
if old_type == new_type:
return lambda v: v
explicit = '{}_to_{}'.format(old_type, new_type)
generic = 'any_to_{}'.format(new_type)
if hasattr(self, explicit):
return getattr(self, explicit)
if hasattr(self, generic):
return getattr(self, generic)
def any_to_text(self, value):
return str(value if value is not None else '').strip()
def any_to_textarea(self, value):
return self.any_to_text(value)
def textarea_to_text(self, value):
return value.replace('\n', ' ').strip()
def textarea_to_code(self, value):
return value
def text_to_code(self, value):
return value
def date_to_text(self, value):
return '{:%d.%m.%Y}'.format(value)
def datetime_to_text(self, value):
return '{:%d.%m.%Y %H:%M}'.format(value)
def time_to_text(self, value):
return '{:%H:%M}'.format(value)
def radio_to_checkbox(self, value):
return [value]
def text_to_url(self, value):
return value
class StructuralChanges(object):
""" Tries to detect structural changes between two formcode blocks.
Can only be trusted if the ``detection_successful`` property is True. If it
is not, the detection failed because the changes were too great.
"""
def __init__(self, old_structure, new_structure):
old_fieldsets = parse_formcode(old_structure)
new_fieldsets = parse_formcode(new_structure)
self.old = {
f.human_id: f for f in flatten_fieldsets(old_fieldsets)
}
self.new = {
f.human_id: f for f in flatten_fieldsets(new_fieldsets)
}
self.old_fieldsets = old_fieldsets
self.new_fieldsets = new_fieldsets
self.detect_added_fieldsets()
self.detect_removed_fieldsets()
self.detect_added_fields()
self.detect_removed_fields()
self.detect_renamed_fields() # modifies added/removed fields
self.detect_changed_fields()
def __bool__(self):
return bool(
self.added_fields
or self.removed_fields
or self.renamed_fields
or self.changed_fields
)
def detect_removed_fieldsets(self):
new_ids = tuple(f.human_id for f in self.new_fieldsets if f.human_id)
self.removed_fieldsets = [
f.human_id for f in self.old_fieldsets
if f.human_id and f.human_id not in new_ids
]
def detect_added_fieldsets(self):
old_ids = tuple(f.human_id for f in self.old_fieldsets if f.human_id)
self.added_fieldsets = [
f.human_id for f in self.new_fieldsets
if f.human_id and f.human_id not in old_ids
]
def detect_added_fields(self):
self.added_fields = [
f.human_id for f in self.new.values()
if f.human_id not in {f.human_id for f in self.old.values()}
]
def detect_removed_fields(self):
self.removed_fields = [
f.human_id for f in self.old.values()
if f.human_id not in {f.human_id for f in self.new.values()}
]
def do_rename(self, removed, added):
if removed in self.renamed_fields:
return False
if added in set(self.renamed_fields.values()):
return False
same_type = self.old[removed].type == self.new[added].type
if not same_type:
return False
added_fs = "/".join(added.split('/')[:-1])
removed_fs = "/".join(removed.split('/')[:-1])
# has no fieldset
if not added_fs and not removed_fs:
return same_type
# case fieldset/Oldname --> Oldname
if removed_fs and not added_fs:
if f'{removed_fs}/{added}' == removed:
return True
# case Oldname --> fieldset/Name
if added_fs and not removed_fs:
if f'{added_fs}/{removed}' == added:
return True
# case fieldset rename and field rename
in_removed = any(s == removed_fs for s in self.removed_fieldsets)
in_added = any(s == added_fs for s in self.added_fieldsets)
# Fieldset rename
expected = f'{added_fs}/{removed.split("/")[-1]}'
if in_added and in_removed:
if expected == added:
return True
if expected in self.added_fields:
return False
if added not in self.renamed_fields.values():
# Prevent assigning same new field twice
return True
# Fieldset has been deleted
if (in_removed and not in_added) or (in_added and not in_removed):
if expected == added:
# It matches exactly
return True
if expected in self.added_fields:
# there is another field that matches better
return False
# if len(self.added_fields) == len(self.removed_fields) == 1:
# return True
return True
def detect_renamed_fields(self):
# renames are detected aggressively - we rather have an incorrect
# rename than an add/remove combo. Renames lead to no data loss, while
# a add/remove combo does.
self.renamed_fields = {}
for r in self.removed_fields:
for a in self.added_fields:
if self.do_rename(r, a):
self.renamed_fields[r] = a
self.added_fields = [
f for f in self.added_fields
if f not in set(self.renamed_fields.values())
]
self.removed_fields = [
f for f in self.removed_fields
if f not in self.renamed_fields
]
def detect_changed_fields(self):
self.changed_fields = []
for old in self.old:
if old in self.renamed_fields:
new = self.renamed_fields[old]
elif old in self.new:
new = old
else:
continue
if self.old[old].required != self.new[new].required:
self.changed_fields.append(new)
elif self.old[old].type != self.new[new].type:
self.changed_fields.append(new)
| 32.829201 | 79 | 0.619619 |
39a67bb7385d264d8d2a6627d7b47e4e5048a380
| 859 |
py
|
Python
|
Backtracking/GenerateParanthesis.py
|
dileeppandey/hello-interview
|
78f6cf4e2da4106fd07f4bd86247026396075c69
|
[
"MIT"
] | null | null | null |
Backtracking/GenerateParanthesis.py
|
dileeppandey/hello-interview
|
78f6cf4e2da4106fd07f4bd86247026396075c69
|
[
"MIT"
] | null | null | null |
Backtracking/GenerateParanthesis.py
|
dileeppandey/hello-interview
|
78f6cf4e2da4106fd07f4bd86247026396075c69
|
[
"MIT"
] | 1 |
2020-02-12T16:57:46.000Z
|
2020-02-12T16:57:46.000Z
|
"""
Given n pairs of parentheses, write a function to generate all combinations of
well-formed parentheses.
"""
class Solution(object):
def generateParanthesisHelper(self, left, right, current, result, n):
if right == 0:
result.append(current)
if left > 0:
self.generateParanthesisHelper(
left-1, right, current+'(', result, n)
if right > left:
self.generateParanthesisHelper(
left, right-1, current+')', result, n)
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
result = []
if n:
left = right = n
self.generateParanthesisHelper(left, right, "", result, n)
return result
s = Solution()
print(s.generateParenthesis(3))
print(s.generateParenthesis(1))
| 28.633333 | 79 | 0.576251 |
8a2afa8b1aef55b0683647413167ec45f9dfffd7
| 806 |
py
|
Python
|
setup.py
|
FrieAT/MD_CompressedWavelet
|
82bd10edd611485cd5f0b81da744e07a3b7c98eb
|
[
"MIT"
] | 2 |
2020-03-28T11:50:45.000Z
|
2020-12-08T13:36:26.000Z
|
setup.py
|
FrieAT/MD_CompressedWavelet
|
82bd10edd611485cd5f0b81da744e07a3b7c98eb
|
[
"MIT"
] | 2 |
2020-04-20T11:12:59.000Z
|
2020-05-11T05:37:36.000Z
|
setup.py
|
FrieAT/MD_CompressedWavelet
|
82bd10edd611485cd5f0b81da744e07a3b7c98eb
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "IP_WaveletFV",
version = "0.0.1",
author = "Andreas, Stefanie, Friedrich, Mostafa",
author_email = "no-one",
description = ("TODO."),
license = "MIT",
keywords = "TODO",
url = "TODO",
packages=['.', 'notebooks'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
| 29.851852 | 79 | 0.630273 |
8a6114b61e2c65ee0b7fd099bedb0a958a8541ac
| 8,478 |
py
|
Python
|
Packs/UnifiVideoNVR/Integrations/UnifiVideo/UnifiVideo.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/UnifiVideoNVR/Integrations/UnifiVideo/UnifiVideo.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/UnifiVideoNVR/Integrations/UnifiVideo/UnifiVideo.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import cv2
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
from unifi_video import UnifiVideoAPI
import dateparser
import json
demisto_format = '%Y-%m-%dT%H:%M:%SZ'
params = demisto.params()
args = demisto.args()
api_key = params.get('api_key')
address = params.get('addr')
port = params.get('port')
schema = params.get('schema')
fetch_limit = params.get('fetch_limit')
verify_cert = params.get('verify_cert')
FETCH_TIME = params.get('fetch_time')
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
uva = UnifiVideoAPI(api_key=api_key, addr=address, port=port, schema=schema, verify_cert=verify_cert)
demisto.results('ok')
if demisto.command() == 'unifivideo-get-camera-list':
uva = UnifiVideoAPI(api_key=api_key, addr=address, port=port, schema=schema, verify_cert=verify_cert)
context_output = []
for camera in uva.cameras:
context_output.append(camera.name)
results = [
CommandResults(
outputs_prefix='UnifiVideo.Cameras',
readable_output=tableToMarkdown("Camera list", context_output, headers=["Camera name"], removeNull=False),
outputs=context_output
)]
return_results(results)
if demisto.command() == 'unifivideo-get-snapshot':
camera_name = args.get('camera_name')
output = bytes()
uva = UnifiVideoAPI(api_key=api_key, addr=address, port=port, schema=schema, verify_cert=verify_cert)
uva.get_camera(camera_name).snapshot("/tmp/snapshot.png")
f = open("/tmp/snapshot.png", "rb")
output = f.read()
filename = "snapshot.png"
file = fileResult(filename=filename, data=output)
file['Type'] = entryTypes['image']
demisto.results(file)
if demisto.command() == 'unifivideo-set-recording-settings':
camera_name = args.get('camera_name')
rec_set = args.get('rec_set')
uva = UnifiVideoAPI(api_key=api_key, addr=address, port=port, schema=schema, verify_cert=verify_cert)
uva.get_camera(camera_name).set_recording_settings(rec_set)
demisto.results(camera_name + ": " + rec_set)
if demisto.command() == 'unifivideo-ir-leds':
camera_name = args.get('camera_name')
ir_leds = args.get('ir_leds')
uva = UnifiVideoAPI(api_key=api_key, addr=address, port=port, schema=schema, verify_cert=verify_cert)
uva.get_camera(camera_name).ir_leds(ir_leds)
demisto.results(camera_name + ": " + ir_leds)
if demisto.command() == 'unifivideo-get-recording':
recording_id = args.get('recording_id')
recording_file_name = 'recording-' + recording_id + '.mp4'
uva = UnifiVideoAPI(api_key=api_key, addr=address, port=port, schema=schema, verify_cert=verify_cert)
uva.refresh_recordings(0)
uva.recordings[recording_id].download('/tmp/recording.mp4')
f = open("/tmp/recording.mp4", "rb")
output = f.read()
filename = recording_file_name
file = fileResult(filename=filename, data=output, file_type=EntryType.ENTRY_INFO_FILE)
demisto.results(file)
if demisto.command() == 'unifivideo-get-recording-motion-snapshot':
recording_id = args.get('recording_id')
snapshot_file_name = 'snapshot-motion-' + recording_id + '.jpg'
uva = UnifiVideoAPI(api_key=api_key, addr=address, port=port, schema=schema, verify_cert=verify_cert)
uva.refresh_recordings(0)
uva.recordings[recording_id].motion('/tmp/snapshot.png')
f = open("/tmp/snapshot.png", "rb")
output = f.read()
filename = snapshot_file_name
file = fileResult(filename=filename, data=output)
file['Type'] = entryTypes['image']
demisto.results(file)
if demisto.command() == 'unifivideo-get-recording-snapshot':
recording_id = args.get('recording_id')
snapshot_file_name = 'snapshot-' + recording_id + '-' + args.get('frame') + '.jpg'
uva = UnifiVideoAPI(api_key=api_key, addr=address, port=port, schema=schema, verify_cert=verify_cert)
uva.refresh_recordings(0)
uva.recordings[recording_id].download('/tmp/recording.mp4')
if "frame" in args:
vc = cv2.VideoCapture('/tmp/recording.mp4') # pylint: disable=E1101
c = 1
if vc.isOpened():
rval, frame = vc.read()
else:
rval = False
while rval:
rval, frame = vc.read()
c = c + 1
if c == int(args.get('frame')):
cv2.imwrite("/tmp/" + snapshot_file_name, frame) # pylint: disable=E1101
break
vc.release()
f = open("/tmp/" + snapshot_file_name, "rb")
output = f.read()
filename = snapshot_file_name
file = fileResult(filename=filename, data=output)
file['Type'] = entryTypes['image']
demisto.results(file)
if demisto.command() == 'unifivideo-get-recording-list':
uva = UnifiVideoAPI(api_key=api_key, addr=address, port=port, schema=schema, verify_cert=verify_cert)
recordings = []
for rec in uva.get_recordings():
rec_tmp = {}
rec_tmp['id'] = rec._id
rec_tmp['rec_type'] = rec.rec_type
rec_tmp['start_time'] = rec.start_time.strftime('%Y-%m-%dT%H:%M:%SZ')
rec_tmp['end_time'] = rec.start_time.strftime('%Y-%m-%dT%H:%M:%SZ')
recordings.append(rec_tmp)
results = [
CommandResults(
outputs_prefix='UnifiVideo.Recordings',
readable_output=tableToMarkdown("Recording list", recordings, headers=["id", "rec_type", "start_time", "end_time"]),
outputs_key_field=['id'],
outputs=recordings
)]
return_results(results)
if demisto.command() == 'unifivideo-get-snapshot-at-frame':
entry_id = demisto.args().get('entryid')
snapshot_file_name = 'snapshot-' + entry_id + '-' + args.get('frame') + '.jpg'
try:
file_result = demisto.getFilePath(entry_id)
except Exception as ex:
return_error("Failed to load file entry with entryid: {}. Error: {}".format(entry_id, ex))
video_path = file_result.get("path") # pylint: disable=E1101
vc = cv2.VideoCapture(video_path) # pylint: disable=E1101
c = 1
if vc.isOpened():
rval, frame = vc.read()
else:
rval = False
while rval:
rval, frame = vc.read()
c = c + 1
if c == int(args.get('frame')):
cv2.imwrite("/tmp/" + snapshot_file_name, frame) # pylint: disable=E1101
break
vc.release()
f = open("/tmp/" + snapshot_file_name, "rb")
output = f.read()
filename = snapshot_file_name
file = fileResult(filename=filename, data=output)
file['Type'] = entryTypes['image']
demisto.results(file)
if demisto.command() == 'fetch-incidents':
start_time_of_int = str(datetime.now())
uva = UnifiVideoAPI(api_key=api_key, addr=address, port=port, schema=schema, verify_cert=verify_cert)
# And retrieve it for use later:
last_run = demisto.getLastRun()
# lastRun is a dictionary, with value "now" for key "time".
# JSON of the incident type created by this integration
inc = []
start_time = dateparser.parse(FETCH_TIME)
if last_run:
start_time = last_run.get('start_time')
if not isinstance(start_time, datetime):
start_time = datetime.strptime(str(start_time), '%Y-%m-%d %H:%M:%S.%f')
uva.refresh_recordings()
for rec in uva.get_recordings(limit=fetch_limit, start_time=start_time, order='desc'):
incident = {}
datetime_object = datetime.strptime(str(rec.start_time), '%Y-%m-%d %H:%M:%S')
for camera in uva.cameras:
cam_id = uva.get_camera(camera.name)
if cam_id._id in rec.cameras:
camera_name = camera.name
try:
if datetime_object > start_time:
incident = {
'name': rec.rec_type,
'occurred': datetime_object.strftime('%Y-%m-%dT%H:%M:%SZ'),
'rawJSON': json.dumps({"event": rec.rec_type, "ubnt_id": rec._id, "camera_name": camera_name,
"integration_lastrun": str(start_time), "start_time": str(rec.start_time),
"stop_time": str(rec.end_time)})
}
inc.append(incident)
except Exception as e:
raise Exception("Problem comparing: " + str(datetime_object) + ' ' + str(start_time) + " Exception: " + str(e))
demisto.incidents(inc)
demisto.setLastRun({'start_time': start_time_of_int})
| 41.558824 | 128 | 0.647087 |
0a5f60eefe99ceb4ed28a5c98f6f2dedba8d805f
| 455 |
py
|
Python
|
118-pascals-triangle/118-pascals-triangle.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2 |
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
pascals-triangle/pascals-triangle.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
pascals-triangle/pascals-triangle.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
if numRows==1:
return [[1]]
else:
output=[[1], [1,1]]
for i in range(numRows-2):
dp=[1]
for idx in range(len(output[-1])-1):
dp.append(output[-1][idx]+output[-1][idx+1])
dp.append(1)
output.append(dp)
return output
| 35 | 64 | 0.415385 |
6a61ccfc3516cfd051605ca670e7b43162efd089
| 4,111 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/ipa_config.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/ipa_config.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/ipa_config.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Fran Fitzpatrick <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ipa_config
author: Fran Fitzpatrick (@fxfitz)
short_description: Manage Global FreeIPA Configuration Settings
description:
- Modify global configuration settings of a FreeIPA Server.
options:
ipadefaultloginshell:
description: Default shell for new users.
aliases: ["loginshell"]
type: str
ipadefaultemaildomain:
description: Default e-mail domain for new users.
aliases: ["emaildomain"]
type: str
extends_documentation_fragment:
- community.general.ipa.documentation
'''
EXAMPLES = r'''
- name: Ensure the default login shell is bash.
ipa_config:
ipadefaultloginshell: /bin/bash
ipa_host: localhost
ipa_user: admin
ipa_pass: supersecret
- name: Ensure the default e-mail domain is ansible.com.
ipa_config:
ipadefaultemaildomain: ansible.com
ipa_host: localhost
ipa_user: admin
ipa_pass: supersecret
'''
RETURN = r'''
config:
description: Configuration as returned by IPA API.
returned: always
type: dict
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
from ansible.module_utils._text import to_native
class ConfigIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(ConfigIPAClient, self).__init__(module, host, port, protocol)
def config_show(self):
return self._post_json(method='config_show', name=None)
def config_mod(self, name, item):
return self._post_json(method='config_mod', name=name, item=item)
def get_config_dict(ipadefaultloginshell=None, ipadefaultemaildomain=None):
config = {}
if ipadefaultloginshell is not None:
config['ipadefaultloginshell'] = ipadefaultloginshell
if ipadefaultemaildomain is not None:
config['ipadefaultemaildomain'] = ipadefaultemaildomain
return config
def get_config_diff(client, ipa_config, module_config):
return client.get_diff(ipa_data=ipa_config, module_data=module_config)
def ensure(module, client):
module_config = get_config_dict(
ipadefaultloginshell=module.params.get('ipadefaultloginshell'),
ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'),
)
ipa_config = client.config_show()
diff = get_config_diff(client, ipa_config, module_config)
changed = False
new_config = {}
for module_key in diff:
if module_config.get(module_key) != ipa_config.get(module_key, None):
changed = True
new_config.update({module_key: module_config.get(module_key)})
if changed and not module.check_mode:
client.config_mod(name=None, item=new_config)
return changed, client.config_show()
def main():
argument_spec = ipa_argument_spec()
argument_spec.update(
ipadefaultloginshell=dict(type='str', aliases=['loginshell']),
ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
client = ConfigIPAClient(
module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot']
)
try:
client.login(
username=module.params['ipa_user'],
password=module.params['ipa_pass']
)
changed, user = ensure(module, client)
module.exit_json(changed=changed, user=user)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| 28.748252 | 103 | 0.705911 |
7c19d60f2d7db293ba8e9b27b5522bb04175668d
| 324 |
py
|
Python
|
Python/Courses/Python-Tutorials.Telusko/01.Object-Oriented-Programming/15.03-Constructor-Inheritance.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Telusko/01.Object-Oriented-Programming/15.03-Constructor-Inheritance.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Telusko/01.Object-Oriented-Programming/15.03-Constructor-Inheritance.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
class A:
def __init__(self):
print("At A init")
def feature1(self):
print("Feature 1 working")
def feature2(self):
print("Feature 2 working")
class B(A):
def feature3(self):
print("Feature 3 working")
def feature4(self):
print("Feature 4 working")
b1 = B()
| 15.428571 | 34 | 0.564815 |
7c3b09cd1be045ceff75c8e2d57e18eea020fc59
| 872 |
py
|
Python
|
python/oneflow/compatible/single_client/unittest/__init__.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 3,285 |
2020-07-31T05:51:22.000Z
|
2022-03-31T15:20:16.000Z
|
python/oneflow/compatible/single_client/unittest/__init__.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 2,417 |
2020-07-31T06:28:58.000Z
|
2022-03-31T23:04:14.000Z
|
python/oneflow/compatible/single_client/unittest/__init__.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 520 |
2020-07-31T05:52:42.000Z
|
2022-03-29T02:38:11.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from oneflow.compatible.single_client.framework.unittest import (
TestCase,
num_nodes_required,
register_test_cases,
skip_unless_1n1d,
skip_unless_1n2d,
skip_unless_1n4d,
skip_unless_2n1d,
skip_unless_2n2d,
skip_unless_2n4d,
)
from . import env
| 30.068966 | 72 | 0.772936 |
7c58ba4aebffcc1fa56edd7c43d697cbbf88678c
| 930 |
py
|
Python
|
python/en/_matplotlib/gallery/text_labels_and_annotations/composing_custom_legends.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/_matplotlib/gallery/text_labels_and_annotations/composing_custom_legends.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/_matplotlib/gallery/text_labels_and_annotations/composing_custom_legends.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
text_labels_and_annotations/composing_custom_legends.py
Matplotlib > Gallery > Text, labels and annotations> Composing Custom Legends
https://matplotlib.org/3.1.1/gallery/text_labels_and_annotations/custom_legends.html#sphx-glr-gallery-text-labels-and-annotations-custom-legends-py
"""
# sphinx_gallery_thumbnail_number = 2
from matplotlib import rcParams, cycler
import matplotlib.pyplot as plt
import numpy as np
# Fixing random state for reproducibility
np.random.seed(19680801)
N = 10
# An integer is added from 0 to 9 to the logspace with additive random noise
data = [np.logspace(0, 1, 100) + np.random.randn(100) + ii for ii in range(N)]
# data is 100x10 numpy array in the float64 type
data = np.array(data).T
cmap = plt.cm.coolwarm
rcParams['axes.prop_cycle'] = cycler(color=cmap(np.linspace(0, 1, N)))
fig, ax = plt.subplots()
lines = ax.plot(data)
ax.legend(lines)
| 34.444444 | 147 | 0.762366 |
6b05a198af42a360d93fe96744515315528adc10
| 3,803 |
py
|
Python
|
Python/Buch_ATBS/Teil_1/Kapitel_06_Stringbearbeitung/10_molare_masse.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | null | null | null |
Python/Buch_ATBS/Teil_1/Kapitel_06_Stringbearbeitung/10_molare_masse.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | 6 |
2020-12-24T15:15:09.000Z
|
2022-01-13T01:58:35.000Z
|
Python/Buch_ATBS/Teil_1/Kapitel_06_Stringbearbeitung/10_molare_masse.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | null | null | null |
# Berechne molare Masse anhand chemischer Summenformel
periodensystem={ 'H' : 1.0079, 'He' : 4.0026,
'Li' : 6.941, 'Be' : 9.0122, 'B' : 10.811, 'C' : 12.011, 'N' : 14.007, 'O' : 15.999, 'F' : 18.988, 'Ne' : 20.180,
'Na' : 22.990, 'Mg' : 24.305, 'Al' : 26.982, 'Si' : 28.086, 'P' : 30.974, 'S' : 32.065, 'Cl' : 35.453, 'Ar' : 39.948,
'K' : 39.098, 'Ca' : 40.078, 'Sc' : 44.956, 'Ti' : 47.867, 'V' : 50.942, 'Cr' : 51.996, 'Mn' : 54.938, 'Fe' : 55.845, 'Co' : 58.933, 'Ni' : 58.693, 'Cu' : 63.546, 'Zn' : 65.38, 'Ga' : 69.723, 'Ge' : 72.64, 'As' : 74.922, 'Se' : 78.96, 'Br' : 79.904, 'Kr' : 83.798,
'Rb' : 85.468, 'Sr' : 87.62, 'Y' : 88.906, 'Zr' : 91.224, 'Nb' : 92.906, 'Mo' : 95.96, 'Tc' : 98.91, 'Ru' : 101.07, 'Rh' : 102.91, 'Pd' : 106.42, 'Ag' : 107.87, 'Cd' : 112.41, 'In' : 114.82, 'Sn' : 118.71, 'Sb' : 121.76, 'Te' : 127.60, 'I' : 126.90, 'Xe' : 131.29,
'Cs' : 132.91, 'Ba' : 137.33, 'Hf' : 178.49, 'Ta' : 180.95, 'W' : 183.84, 'Re' : 186.21, 'Os' : 190.23, 'Ir' : 192.22, 'Pt' : 195.08, 'Au' : 196.97, 'Hg' : 200.59, 'Tl' : 204.38, 'Pb' : 207.2, 'Bi' : 208.98, 'Po' : 209.98, 'At' : 210, 'Rn' : 222,
'Fr' : 223, 'Ra' : 226.03, 'Rf' : 261, 'Db' : 262, 'Sg' : 263, 'Bh' : 262, 'Hs' : 265, 'Mt' : 266, 'Ds' : 296, 'Rg' : 272, 'Cn' : 277, 'Nh' : 287, 'Fl' : 289, 'Mc' : 288, 'Lv' : 289, 'Ts' : 293, 'Og' : 294,
'La' : 138.91, 'Ce' : 140.12, 'Pr' : 140.91, 'Nd' : 144.24, 'Pm' : 146.90, 'Sm' : 146.90, 'Eu' : 151.96, 'Gd' : 157.25, 'Tb' : 158.93, 'Dy' : 162.50, 'Ho' : 164.93, 'Er' : 167.26, 'Tm' : 168.93, 'Yb' : 173.05, 'Lu' : 174.97,
'Ac' : 227, 'Th' : 232.04, 'Pa' : 231.04, 'U' : 238.03, 'Np' : 237.05, 'Pu' : 244.10, 'Am' : 243.10, 'Cm' : 247.10, 'Bk' : 247.10, 'Cf' : 251.10, 'Es' : 254.10, 'Fm' : 257.10, 'Md' : 258, 'No' : 259, 'Lr' : 260}
# Prüfe welche Zahl hinter dem Element steht
def checknum(i, nummer):
zahl=str(nummer)
global z
z=0
# Prüfe auf Folgezahlen.
for n in range(i+1, len(sformel)):
# Falls Zahl gefunden, hänge an vorherige an
if sformel[n].isdecimal():
zahl+=str(sformel[n])
z+=1
else:
# Beende Loop wenn keine Zahl folgt
if not sformel[n].isdecimal():
break
return zahl
# Prüfe ob Element vollständig ist
def checkelement(i, element, gew):
# (Wenn letzter Buchstabe in Liste) oder (nächster Buchstabe in Liste gross)
if i == len(sformel)-1 or i < len(sformel)-1 and sformel[i+1].isupper():
gew+=float(periodensystem[element])
return gew
while True:
print('Summenformel eingeben:')
sformel=input('Summenformel: ')
if not sformel.isalnum():
print ('Elemente besitzen keine Sonderzeichen!')
continue
break
sformel=list(sformel)
i=-1
gew=0
try:
while i != len(sformel)-1:
i+=1
# Wenn Grossbuchstabe
if sformel[i].isupper():
element=sformel[i]
gew=checkelement(i, element, gew)
# Wenn Buchstabe klein, hänge an vorherigen an
elif sformel[i].islower():
if element[0].isupper():
element+=sformel[i]
gew=checkelement(i, element, gew)
else:
print(periodensystem['Error'])
# Wenn Element eine Zahl
elif sformel[i].isdecimal():
num=str(checknum(i, sformel[i]))
i+=z
gew+=float(periodensystem[element])*float(num)
element=''
else:
print('Error')
break
print('Die molare Masse von', ''.join(sformel), 'ist', str(round(gew, 4)) + 'g/mol')
except NameError:
print('Summenformel falsch formuliert. Beispiel: CO2, H2O, C9H8O4, CaHPO4')
| 50.039474 | 281 | 0.506179 |
868d1e021d5b45d47108767f8f7aa79bc8b8371d
| 1,791 |
py
|
Python
|
apps/multivers/migrations/0010_conceptorder_conceptorderdrink_conceptorderdrinkline.py
|
LvanArkel/sbzwebsite
|
a26efbb050585312c53010f14f86c23616a8071f
|
[
"BSD-3-Clause"
] | 1 |
2017-01-08T13:21:43.000Z
|
2017-01-08T13:21:43.000Z
|
apps/multivers/migrations/0010_conceptorder_conceptorderdrink_conceptorderdrinkline.py
|
LvanArkel/sbzwebsite
|
a26efbb050585312c53010f14f86c23616a8071f
|
[
"BSD-3-Clause"
] | 17 |
2018-12-03T14:22:14.000Z
|
2021-07-14T15:15:12.000Z
|
apps/multivers/migrations/0010_conceptorder_conceptorderdrink_conceptorderdrinkline.py
|
LvanArkel/sbzwebsite
|
a26efbb050585312c53010f14f86c23616a8071f
|
[
"BSD-3-Clause"
] | 2 |
2018-12-03T14:58:49.000Z
|
2019-12-01T13:24:42.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-04 14:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('multivers', '0009_auto_20180704_1144'),
]
operations = [
migrations.CreateModel(
name='ConceptOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='multivers.Customer')),
],
),
migrations.CreateModel(
name='ConceptOrderDrink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('name', models.CharField(max_length=255)),
('locations', models.ManyToManyField(to='multivers.Location')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='multivers.ConceptOrder')),
],
),
migrations.CreateModel(
name='ConceptOrderDrinkLine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.FloatField()),
('drink', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='multivers.ConceptOrderDrink')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='multivers.Product')),
],
),
]
| 40.704545 | 124 | 0.598548 |
07d57dbd387d64d3f3800858f9a51e88000932d4
| 8,040 |
py
|
Python
|
src/onegov/file/upgrade.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/file/upgrade.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/file/upgrade.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
""" Contains upgrade tasks that are executed when the application is being
upgraded on the server. See :class:`onegov.core.upgrade.upgrade_task`.
"""
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
from copy import copy
from onegov.core.orm import as_selectable
from onegov.core.orm.types import UTCDateTime, JSON
from onegov.core.upgrade import upgrade_task
from onegov.core.utils import normalize_for_url
from onegov.file import File, FileCollection
from onegov.file.attachments import get_svg_size_or_default
from onegov.file.filters import WithPDFThumbnailFilter
from onegov.file.integration import DepotApp
from onegov.file.utils import content_type_from_fileobj
from onegov.file.utils import get_image_size
from onegov.file.utils import word_count
from onegov.pdf.utils import extract_pdf_info
from PIL import Image
from sqlalchemy import Boolean, Column, Integer, Text, text, select
from sqlalchemy.orm import load_only
from sqlalchemy.orm.attributes import flag_modified
@upgrade_task('Add checksum column')
def add_checksum_column(context):
context.operations.add_column(
'files', Column('checksum', Text, nullable=True, index=True)
)
@upgrade_task('Add image size 3')
def add_image_size(context):
images = FileCollection(context.session, type='image')
for image in images.query():
if not hasattr(image.reference, 'size'):
# potentially dangerous and might not work with other storage
# providers, so don't reuse unless you are sure about the
# consequences
image.reference._thaw()
if image.reference.content_type == 'image/svg+xml':
image.reference.size = get_svg_size_or_default(
image.reference.file
)
else:
image.reference.size = get_image_size(
Image.open(image.reference.file)
)
thumbnail_metadata = copy(image.reference.thumbnail_small)
thumbnail_metadata['size'] = get_image_size(
Image.open(
context.app.bound_depot.get(
image.get_thumbnail_id(size='small')
)
)
)
image.reference.thumbnail_small = thumbnail_metadata
flag_modified(image, 'reference')
@upgrade_task('Add files by type and name index')
def add_files_by_type_and_name_index(context):
context.operations.create_index(
'files_by_type_and_name', 'files', ['type', 'name'])
@upgrade_task('Migrate file metadata to JSONB')
def migrate_file_metadata_to_jsonb(context):
context.session.execute("""
ALTER TABLE files
ALTER COLUMN reference
TYPE JSONB USING reference::jsonb
""")
context.operations.drop_index('files_by_type_and_name')
context.add_column_with_defaults(
table='files',
column=Column('order', Text, nullable=False),
default=lambda r: normalize_for_url(r.name))
context.operations.create_index(
'files_by_type_and_order', 'files', ['type', 'order'])
@upgrade_task('Add thumbnails to PDFs')
def add_thumbnails_to_pdfs(context):
if not isinstance(context.app, DepotApp):
return False
depot = context.request.app.bound_depot
files = FileCollection(context.session).query()
files = iter(files.filter(text(
"files.reference->>'content_type' = 'application/pdf'"
)))
pdf_filter = WithPDFThumbnailFilter(
'medium', size=(512, 512), format='png'
)
# make sure that all cores are used for ghostscript
# each thread will keep one ghostscript process busy
max_workers = multiprocessing.cpu_count()
def chunks(size=max_workers):
while True:
chunk = []
for n in range(size):
pdf = next(files, None)
if not pdf:
return
chunk.append((pdf, depot.get(pdf.reference.file_id)))
yield chunk
for chunk in chunks():
pdfs, contents = zip(*(chunk))
with ThreadPoolExecutor(max_workers=max_workers) as e:
results = zip(
pdfs,
e.map(pdf_filter.generate_thumbnail, contents)
)
for pdf, thumbnail in results:
# potentially dangerous and might not work with other storage
# providers, so don't reuse unless you are sure about the
# consequences
pdf.reference._thaw()
pdf_filter.store_thumbnail(pdf.reference, thumbnail)
flag_modified(pdf, 'reference')
@upgrade_task('Add publication dates')
def add_publication_dates(context):
context.operations.add_column(
'files', Column('publish_date', UTCDateTime, nullable=True))
context.add_column_with_defaults(
table='files',
column=Column('published', Boolean, nullable=False),
default=True)
@upgrade_task('Add signed property')
def add_signed_property(context):
context.add_column_with_defaults(
table='files',
column=Column('signed', Boolean, nullable=False),
default=False)
@upgrade_task('Reclassify office documents')
def reclassify_office_documents(context):
if not isinstance(context.app, DepotApp):
return False
files = FileCollection(context.session).query()\
.options(load_only('reference'))
with context.stop_search_updates():
for f in files.filter(File.name.op('~*')(r'^.*\.(docx|xlsx|pptx)$')):
content_type = content_type_from_fileobj(f.reference.file)
f._update_metadata(content_type=content_type)
context.session.flush()
@upgrade_task('Add extract and pages column')
def add_extract_and_pages_column(context):
context.operations.add_column(
'files', Column('extract', Text, nullable=True))
context.operations.add_column(
'files', Column('pages', Integer, nullable=True))
@upgrade_task('Extract pdf text of existing files')
def extract_pdf_text_of_existing_files(context):
pdfs = FileCollection(context.session).by_content_type('application/pdf')
for pdf in pdfs:
pdf.pages, pdf.extract = extract_pdf_info(pdf.reference.file)
# potentially dangerous and might not work with other storage
# providers, so don't reuse unless you are sure about the
# consequences
pdf.reference._thaw()
pdf.reference['temporary-pages-count'] = pdf.pages
flag_modified(pdf, 'reference')
@upgrade_task('Add signature_metadata column')
def add_signature_metadata_column(context):
context.operations.add_column(
'files', Column('signature_metadata', JSON, nullable=True))
@upgrade_task('Add stats column')
def add_stats_column(context):
context.operations.add_column(
'files', Column('stats', JSON, nullable=True))
selectable = as_selectable("""
SELECT
id, -- Text
pages -- Integer
FROM files
WHERE reference->>'content_type' = 'application/pdf'
""")
pages = {
f.id: f.pages for f in context.session.execute(
select(selectable.c)
)
}
pdfs = FileCollection(context.session).by_content_type('application/pdf')
for pdf in pdfs:
pdf.stats = {
'pages': pdf.reference.pop('temporary-pages-count', pages[pdf.id]),
'words': word_count(pdf.extract)
}
context.session.flush()
context.operations.drop_column('files', 'pages')
@upgrade_task('Add publication column')
def add_publication_column(context):
if not context.has_column('files', 'publication'):
context.operations.add_column(
'files',
Column(
'publication',
Boolean,
nullable=False,
default=False,
server_default='FALSE'
)
)
| 31.042471 | 79 | 0.651493 |
07f36396019c9f26deab72fab94dbd2d8ca01c06
| 547 |
py
|
Python
|
crawlab/worker.py
|
anhilo/crawlab
|
363f4bf7a4ccc192a99850998c1bd0fc363832a1
|
[
"BSD-3-Clause"
] | 1 |
2019-08-20T14:26:39.000Z
|
2019-08-20T14:26:39.000Z
|
crawlab/worker.py
|
anhilo/crawlab
|
363f4bf7a4ccc192a99850998c1bd0fc363832a1
|
[
"BSD-3-Clause"
] | null | null | null |
crawlab/worker.py
|
anhilo/crawlab
|
363f4bf7a4ccc192a99850998c1bd0fc363832a1
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import os
# make sure the working directory is in system path
file_dir = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.abspath(os.path.join(file_dir, '..'))
sys.path.append(root_path)
from tasks.celery import celery_app
# import necessary tasks
import tasks.spider
import tasks.deploy
if __name__ == '__main__':
if 'win32' in sys.platform:
celery_app.start(argv=['tasks', 'worker', '-P', 'eventlet', '-E', '-l', 'INFO'])
else:
celery_app.start(argv=['tasks', 'worker', '-E', '-l', 'INFO'])
| 27.35 | 88 | 0.678245 |
ed42b186ba3ca9f281d95a201823fcbdc0bbf605
| 2,376 |
py
|
Python
|
research/nlp/seq2seq/src/utils/loss_monitor.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/nlp/seq2seq/src/utils/loss_monitor.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/nlp/seq2seq/src/utils/loss_monitor.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Loss monitor."""
import time
from mindspore.train.callback import Callback
from config import Seq2seqConfig
class LossCallBack(Callback):
"""
Monitor the loss in training.
If the loss is NAN or INF terminating training.
Note:
If per_print_times is 0 do not print loss.
Args:
per_print_times (int): Print loss every times. Default: 1.
"""
time_stamp_init = False
time_stamp_first = 0
def __init__(self, config: Seq2seqConfig, per_print_times: int = 1):
super(LossCallBack, self).__init__()
if not isinstance(per_print_times, int) or per_print_times < 0:
raise ValueError("print_step must be int and >= 0.")
self.config = config
self._per_print_times = per_print_times
if not self.time_stamp_init:
self.time_stamp_first = self._get_ms_timestamp()
self.time_stamp_init = True
def step_end(self, run_context):
"""step end."""
cb_params = run_context.original_args()
file_name = "./loss.log"
with open(file_name, "a+") as f:
time_stamp_current = self._get_ms_timestamp()
f.write("time: {}, epoch: {}, step: {}, outputs: [loss: {}, overflow: {}, loss scale value: {} ].\n".format(
time_stamp_current - self.time_stamp_first,
cb_params.cur_epoch_num,
cb_params.cur_step_num,
str(cb_params.net_outputs[0].asnumpy()),
str(cb_params.net_outputs[1].asnumpy()),
str(cb_params.net_outputs[2].asnumpy())
))
@staticmethod
def _get_ms_timestamp():
t = time.time()
return int(round(t * 1000))
| 35.462687 | 120 | 0.630471 |
ed885b8c33209e84bb25fde1cdadb83524732f1d
| 6,663 |
py
|
Python
|
notebooks_and_scripts/graph_miner/repositories/kg_obo_graph_repository.py
|
LucaCappelletti94/EnsmallenGraph
|
572532b6d3f4352bf58f9ccca955376acd95fd89
|
[
"MIT"
] | null | null | null |
notebooks_and_scripts/graph_miner/repositories/kg_obo_graph_repository.py
|
LucaCappelletti94/EnsmallenGraph
|
572532b6d3f4352bf58f9ccca955376acd95fd89
|
[
"MIT"
] | null | null | null |
notebooks_and_scripts/graph_miner/repositories/kg_obo_graph_repository.py
|
LucaCappelletti94/EnsmallenGraph
|
572532b6d3f4352bf58f9ccca955376acd95fd89
|
[
"MIT"
] | null | null | null |
"""Sub-module handling the retrieval and building of graphs from KG-OBO."""
from typing import List, Dict
import os
import requests
from bs4 import BeautifulSoup
import yaml
from .graph_repository import GraphRepository
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
class KGOBOGraphRepository(GraphRepository):
def __init__(self):
"""Create new KG-OBO Graph Repository object."""
super().__init__()
self._data = self.get_data()
def get_data(self) -> Dict:
"""Returns metadata mined from the KGHub repository."""
mined_data = {}
root_url = "https://kg-hub.berkeleybop.io/kg-obo/"
yaml_url = urljoin(root_url, "tracking.yaml")
graph_url_placeholder = urljoin(
root_url,
"{graph_name}/{version}/{graph_name}_kgx_tsv.tar.gz"
)
graph_data = {
graph: data
for graph, data in yaml.safe_load(
requests.get(yaml_url).content.decode('utf-8')
)["ontologies"].items()
if data["current_version"] != "NA"
}
for graph_name, data in graph_data.items():
versions = [
data["current_version"],
*(
[e["version"] for e in data["archive"]] if "archive" in data else []
)
]
callable_graph_name = graph_name.upper()
mined_data[callable_graph_name] = {}
for version in versions:
if "\n" in version:
continue
graph_url = graph_url_placeholder.format(
graph_name=graph_name,
version=version
)
mined_data[callable_graph_name][version] = {
"urls": [graph_url],
"arguments": {
"edge_path": "{graph_name}_kgx_tsv/{graph_name}_kgx_tsv_edges.tsv".format(graph_name=graph_name),
"node_path": "{graph_name}_kgx_tsv/{graph_name}_kgx_tsv_nodes.tsv".format(graph_name=graph_name),
"name": callable_graph_name,
"sources_column": "subject",
"destinations_column": "object",
"edge_list_edge_types_column": "predicate",
"nodes_column": "id",
"node_list_node_types_column": "category",
"node_types_separator": "|",
"node_list_is_correct": True,
"edge_list_is_correct": True,
}
}
if len(mined_data[callable_graph_name]) == 0:
mined_data.pop(callable_graph_name)
return mined_data
def build_stored_graph_name(self, partial_graph_name: str) -> str:
"""Return built graph name.
Parameters
-----------------------
partial_graph_name: str,
Partial graph name to be built.
Returns
-----------------------
Complete name of the graph.
"""
return partial_graph_name
def get_formatted_repository_name(self) -> str:
"""Return formatted repository name."""
return "KGOBO"
def get_graph_arguments(
self,
graph_name: str,
version: str
) -> List[str]:
"""Return arguments for the given graph and version.
Parameters
-----------------------
graph_name: str,
Name of graph to retrievel arguments for.
version: str,
Version to retrieve this information for.
Returns
-----------------------
The arguments list to use to build the graph.
"""
return self._data[graph_name][version]["arguments"]
def get_graph_versions(
self,
graph_name: str,
) -> List[str]:
"""Return list of versions of the given graph.
Parameters
-----------------------
graph_name: str,
Name of graph to retrieve versions for.
Returns
-----------------------
List of versions for the given graph.
"""
return list(self._data[graph_name].keys())
def get_graph_urls(
self,
graph_name: str,
version: str
) -> List[str]:
"""Return urls for the given graph and version.
Parameters
-----------------------
graph_name: str,
Name of graph to retrievel URLs for.
version: str,
Version to retrieve this information for.
Returns
-----------------------
The urls list from where to download the graph data.
"""
return self._data[graph_name][version]["urls"]
def get_graph_references(self, graph_name: str, version: str) -> List[str]:
"""Return url for the given graph.
Parameters
-----------------------
graph_name: str,
Name of graph to retrievel URLs for.
version: str,
Version to retrieve this information for.
Returns
-----------------------
Citations relative to the Kg graphs.
"""
return [
open(
"{}/models/kgobo.bib".format(
os.path.dirname(os.path.abspath(__file__)),
graph_name
),
"r"
).read()
]
def get_graph_urls(
self,
graph_name: str,
version: str
) -> List[str]:
"""Return urls for the given graph and version.
Parameters
-----------------------
graph_name: str,
Name of graph to retrievel URLs for.
version: str,
Version to retrieve this information for.
Returns
-----------------------
The urls list from where to download the graph data.
"""
return self._data[graph_name][version]["urls"]
def get_graph_paths(
self,
graph_name: str,
version: str
) -> List[str]:
"""Return paths for the given graph and version.
Parameters
-----------------------
graph_name: str,
Name of graph to retrievel paths for.
version: str,
Version to retrieve this information for.
Returns
-----------------------
The paths list from where to download the graph data.
"""
return None
def get_graph_list(self) -> List[str]:
"""Return list of graph names."""
return list(self._data.keys())
| 30.705069 | 121 | 0.512382 |
71ffefff6d77a99b588f62a319efabc2857435d2
| 4,461 |
py
|
Python
|
Project3/test_main.py
|
veronikadim99/Wissenschaftliches-Rechnen
|
3b7c86e9488bf434f3ad1d590f5b9bb9b4cdf218
|
[
"Apache-2.0"
] | null | null | null |
Project3/test_main.py
|
veronikadim99/Wissenschaftliches-Rechnen
|
3b7c86e9488bf434f3ad1d590f5b9bb9b4cdf218
|
[
"Apache-2.0"
] | null | null | null |
Project3/test_main.py
|
veronikadim99/Wissenschaftliches-Rechnen
|
3b7c86e9488bf434f3ad1d590f5b9bb9b4cdf218
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import unittest
from main import power_iteration, load_images, setup_data_matrix, calculate_pca, accumulated_energy, project_faces, \
identify_faces
from lib import visualize_eigenfaces, plot_singular_values_and_energy, plot_identified_faces
class Tests(unittest.TestCase):
def test_0_power_iteration(self):
T = np.array([[0.5, 0.25, 0.25], [0.5, 0.0, 0.5], [0.25, 0.25, 0.5]])
evector, residuals = power_iteration(T.transpose())
evalues_ref, evectors_ref = np.linalg.eig(T.transpose())
self.assertTrue(np.abs(evalues_ref[0] - 1.0) < 10.0 * np.finfo(T.dtype).eps)
product = np.dot(evector, evectors_ref[:, 0])
self.assertTrue(np.isclose(np.abs(product), 1.0)) # Check for right direction
self.assertTrue(np.isclose(np.linalg.norm(evector), 1.0)) # Check for unit length
# plot convergence behavior
plt.plot(residuals, '-rx')
plt.yscale('log')
plt.xscale('log')
plt.xlabel('Number of Iteration')
plt.ylabel('Estimated Error')
plt.legend(['T'])
plt.show()
def setup_tests(self, stage, cutoff_threshold=0.8):
# Read training data set
self.imgs_train, self.dim_x, self.dim_y = load_images("./data/train/")
if stage == "load_images":
return
# compute data matrix
self.D = setup_data_matrix(self.imgs_train)
if stage == "setup_data_matrix":
return
# Perform principal component analysis
self.pcs, self.sv, self.mean_data = calculate_pca(self.D)
if stage == "calculate_pca":
return
# compute threshold for 90% of spectral energy
self.k = accumulated_energy(self.sv, cutoff_threshold)
if stage == "accumulated_energy":
return
# cut off number of pcs if desired
self.pcs = self.pcs[0:self.k, :]
# compute coefficients of input in eigenbasis
self.coeffs_train = project_faces(self.pcs, self.imgs_train, self.mean_data)
if stage == "project_faces":
return
# perform classical face recognition
self.scores, self.imgs_test, self.coeffs_test = identify_faces(self.coeffs_train, self.pcs, self.mean_data,
'./data/test/')
def test_1_load_images(self):
self.setup_tests("load_images")
# check if images are loaded properly
self.assertTrue(len(self.imgs_train) == 60)
self.assertTrue(isinstance(self.imgs_train[0], np.ndarray))
self.assertTrue(
self.dim_x == self.imgs_train[0].shape[1] == 98 and self.dim_y == self.imgs_train[0].shape[0] == 116)
def test_2_setup_data_matrix(self):
self.setup_tests("setup_data_matrix")
self.assertTrue(isinstance(self.D, np.ndarray))
self.assertTrue(self.D.shape[0] == len(self.imgs_train) and self.D.shape[1] == self.dim_x * self.dim_y == 11368)
def test_3_calculate_pca(self):
self.setup_tests("calculate_pca")
# Test certain properties of the pca
self.assertTrue(
isinstance(self.pcs, np.ndarray) and isinstance(self.sv, np.ndarray) and isinstance(self.mean_data,
np.ndarray))
self.assertTrue(self.pcs.shape[0] == len(self.imgs_train) == 60)
self.assertTrue(self.pcs.shape[1] == self.dim_x * self.dim_y == 11368)
self.assertTrue(self.mean_data.shape[0] == 11368)
# Visualize the eigenfaces/principal components
visualize_eigenfaces(10, self.pcs, self.sv, self.dim_x, self.dim_y)
def test_4_accumulated_energy(self):
self.setup_tests("accumulated_energy")
self.assertTrue(self.k > 0)
plot_singular_values_and_energy(self.sv, self.k)
def test_5_project_faces(self):
self.setup_tests("project_faces")
self.assertTrue(self.coeffs_train.shape == (len(self.imgs_train), self.pcs.shape[0]))
def test_6_identify_faces(self):
self.setup_tests("identify_faces")
self.assertTrue(self.scores.shape == (self.coeffs_train.shape[0], self.coeffs_test.shape[0]) != (1, 1))
plot_identified_faces(self.scores, self.imgs_train, self.imgs_test, self.pcs, self.coeffs_test, self.mean_data)
if __name__ == '__main__':
unittest.main()
| 41.691589 | 120 | 0.635956 |
92bb9babe840d176a211d0b6f3653a9fa6fad339
| 11,633 |
py
|
Python
|
Packs/MailListener_-_POP3/Integrations/MailListener_POP3/MailListener_POP3.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/MailListener_-_POP3/Integrations/MailListener_POP3/MailListener_POP3.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/MailListener_-_POP3/Integrations/MailListener_POP3/MailListener_POP3.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import poplib
import base64
import quopri
from email.parser import Parser
from htmlentitydefs import name2codepoint
from HTMLParser import HTMLParser, HTMLParseError
''' GLOBALS/PARAMS '''
SERVER = demisto.params().get('server', '')
EMAIL = demisto.params().get('email', '')
PASSWORD = demisto.params().get('password', '')
PORT = int(demisto.params().get('port', '995'))
SSL = demisto.params().get('ssl')
FETCH_TIME = demisto.params().get('fetch_time', '7 days')
# pop3 server connection object.
pop3_server_conn = None # type: ignore
TIME_REGEX = re.compile(r'^([\w,\d: ]*) (([+-]{1})(\d{2}):?(\d{2}))?[\s\w\(\)]*$')
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def connect_pop3_server():
global pop3_server_conn
if pop3_server_conn is None:
if SSL:
pop3_server_conn = poplib.POP3_SSL(SERVER, PORT) # type: ignore
else:
pop3_server_conn = poplib.POP3(SERVER, PORT) # type: ignore
pop3_server_conn.getwelcome() # type: ignore
pop3_server_conn.user(EMAIL) # type: ignore
pop3_server_conn.pass_(PASSWORD) # type: ignore
def close_pop3_server_connection():
global pop3_server_conn
if pop3_server_conn is not None:
pop3_server_conn.quit()
pop3_server_conn = None
def get_user_emails():
_, mails_list, _ = pop3_server_conn.list() # type: ignore
mails = []
index = ''
for mail in mails_list:
try:
index = mail.split(' ')[0]
(resp_message, lines, octets) = pop3_server_conn.retr(index) # type: ignore
msg_content = unicode(b'\r\n'.join(lines), errors='ignore').encode("utf-8")
msg = Parser().parsestr(msg_content)
msg['index'] = index
mails.append(msg)
except Exception:
demisto.error("Failed to get email with index " + index + 'from the server.')
raise
return mails
def get_attachment_name(headers):
name = headers.get('content-description', '')
if re.match(r'^.+\..{3,5}$', name):
return name
content_disposition = headers.get('content-disposition', '')
if content_disposition:
m = re.search('filename="(.*?)"', content_disposition)
if m:
name = m.group(1)
if re.match('^.+\..{3,5}$', name):
return name
extension = re.match(r'.*[\\/]([\d\w]{2,4}).*', headers.get('content-type', 'txt')).group(1) # type: ignore
return name + '.' + extension
def parse_base64(text):
if re.match("^=?.*?=$", text):
res = re.search('=\?.*?\?[A-Z]{1}\?(.*?)\?=', text, re.IGNORECASE)
if res:
res = res.group(1)
return base64.b64decode(res) # type: ignore
return text
class TextExtractHtmlParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self._texts = [] # type: list
self._ignore = False
def handle_starttag(self, tag, _):
if tag in ('p', 'br') and not self._ignore:
self._texts.append('\n')
elif tag in ('script', 'style'):
self._ignore = True
def handle_startendtag(self, tag, _):
if tag in ('br', 'tr') and not self._ignore:
self._texts.append('\n')
def handle_endtag(self, tag):
if tag in ('p', 'tr'):
self._texts.append('\n')
elif tag in ('script', 'style'):
self._ignore = False
def handle_data(self, data):
if data and not self._ignore:
stripped = data.strip()
if stripped:
self._texts.append(re.sub(r'\s+', ' ', stripped))
def handle_entityref(self, name):
if not self._ignore and name in name2codepoint:
self._texts.append(unichr(name2codepoint[name]))
def handle_charref(self, name):
if not self._ignore:
if name.startswith('x'):
c = unichr(int(name[1:], 16))
else:
c = unichr(int(name))
self._texts.append(c)
def get_text(self):
return "".join(self._texts)
def html_to_text(html):
parser = TextExtractHtmlParser()
try:
parser.feed(html)
parser.close()
except HTMLParseError:
pass
return parser.get_text()
def get_email_context(email_data):
context_headers = email_data._headers
context_headers = [{'Name': v[0], 'Value': v[1]}
for v in context_headers]
headers = dict([(h['Name'].lower(), h['Value']) for h in context_headers])
context = {
'Mailbox': EMAIL,
'ID': email_data.get('Message-ID', 'None'),
'Labels': ', '.join(email_data.get('labelIds', '')),
'Headers': context_headers,
'Format': headers.get('content-type', '').split(';')[0],
'Subject': parse_base64(headers.get('subject')),
'Body': email_data._payload,
'From': headers.get('from'),
'To': headers.get('to'),
'Cc': headers.get('cc', []),
'Bcc': headers.get('bcc', []),
'Date': headers.get('date', ''),
'Html': None,
}
if 'text/html' in context['Format']:
context['Html'] = context['Body']
context['Body'] = html_to_text(context['Body'])
if 'multipart' in context['Format']:
context['Body'], context['Html'], context['Attachments'] = parse_mail_parts(email_data._payload)
context['Attachment Names'] = ', '.join(
[attachment['Name'] for attachment in context['Attachments']])
raw = dict(email_data)
raw['Body'] = context['Body']
context['RawData'] = json.dumps(raw)
return context, headers
def parse_mail_parts(parts):
body = unicode("", "utf-8")
html = unicode("", "utf-8")
attachments = [] # type: ignore
for part in parts:
context_headers = part._headers
context_headers = [{'Name': v[0], 'Value': v[1]}
for v in context_headers]
headers = dict([(h['Name'].lower(), h['Value']) for h in context_headers])
content_type = headers.get('content-type', 'text/plain')
is_attachment = headers.get('content-disposition', '').startswith('attachment')\
or headers.get('x-attachment-id') or "image" in content_type
if 'multipart' in content_type or isinstance(part._payload, list):
part_body, part_html, part_attachments = parse_mail_parts(part._payload)
body += part_body
html += part_html
attachments.extend(part_attachments)
elif not is_attachment:
if headers.get('content-transfer-encoding') == 'base64':
text = base64.b64decode(part._payload).decode('utf-8', 'replace')
elif headers.get('content-transfer-encoding') == 'quoted-printable':
str_utf8 = part._payload.decode('cp1252')
str_utf8 = str_utf8.encode('utf-8')
decoded_string = quopri.decodestring(str_utf8)
text = unicode(decoded_string, errors='ignore').encode("utf-8")
else:
str_utf8 = part._payload.decode('cp1252')
str_utf8 = str_utf8.encode('utf-8')
text = quopri.decodestring(str_utf8)
if not isinstance(text, unicode):
text = text.decode('unicode-escape')
if 'text/html' in content_type:
html += text
else:
body += text
else:
attachments.append({
'ID': headers.get('x-attachment-id', 'None'),
'Name': get_attachment_name(headers),
'Data': part._payload
})
return body, html, attachments
def parse_time(t):
base_time, _, _, _, _ = TIME_REGEX.findall(t)[0]
return datetime.strptime(base_time, '%a, %d %b %Y %H:%M:%S').isoformat() + 'Z'
def create_incident_labels(parsed_msg, headers):
labels = [
{'type': 'Email/ID', 'value': parsed_msg['ID']},
{'type': 'Email/subject', 'value': parsed_msg['Subject']},
{'type': 'Email/text', 'value': parsed_msg['Body']},
{'type': 'Email/from', 'value': parsed_msg['From']},
{'type': 'Email/html', 'value': parsed_msg['Html']},
]
labels.extend([{'type': 'Email/to', 'value': to}
for to in headers.get('To', '').split(',')])
labels.extend([{'type': 'Email/cc', 'value': cc}
for cc in headers.get('Cc', '').split(',')])
labels.extend([{'type': 'Email/bcc', 'value': bcc}
for bcc in headers.get('Bcc', '').split(',')])
for key, val in headers.items():
labels.append({'type': 'Email/Header/' + key, 'value': val})
return labels
@logger
def mail_to_incident(msg):
parsed_msg, headers = get_email_context(msg)
file_names = []
for attachment in parsed_msg.get('Attachments', []):
file_data = base64.urlsafe_b64decode(attachment['Data'].encode('ascii'))
# save the attachment
file_result = fileResult(attachment['Name'], file_data)
# check for error
if file_result['Type'] == entryTypes['error']:
demisto.error(file_result['Contents'])
raise Exception(file_result['Contents'])
file_names.append({
'path': file_result['FileID'],
'name': attachment['Name'],
})
return {
'name': parsed_msg['Subject'],
'details': parsed_msg['Body'],
'labels': create_incident_labels(parsed_msg, headers),
'occurred': parse_time(parsed_msg['Date']),
'attachment': file_names,
'rawJSON': parsed_msg['RawData']
}
def fetch_incidents():
last_run = demisto.getLastRun()
last_fetch = last_run.get('time')
# handle first time fetch
if last_fetch is None:
last_fetch, _ = parse_date_range(FETCH_TIME, date_format=DATE_FORMAT)
last_fetch = datetime.strptime(last_fetch, DATE_FORMAT)
current_fetch = last_fetch
incidents = []
messages = get_user_emails()
for msg in messages:
try:
incident = mail_to_incident(msg)
except Exception:
demisto.error("failed to create incident from email, index = {}, subject = {}, date = {}".format(
msg['index'], msg['subject'], msg['date']))
raise
temp_date = datetime.strptime(
incident['occurred'], DATE_FORMAT)
# update last run
if temp_date > last_fetch:
last_fetch = temp_date + timedelta(seconds=1)
# avoid duplication due to weak time query
if temp_date > current_fetch:
incidents.append(incident)
demisto.setLastRun({'time': last_fetch.isoformat().split('.')[0] + 'Z'})
return demisto.incidents(incidents)
def test_module():
resp_message, _, _ = pop3_server_conn.list() # type: ignore
if "OK" in resp_message:
demisto.results('ok')
''' COMMANDS MANAGER / SWITCH PANEL '''
def main():
try:
handle_proxy()
connect_pop3_server()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
test_module()
if demisto.command() == 'fetch-incidents':
fetch_incidents()
sys.exit(0)
except Exception as e:
LOG(str(e))
LOG.print_log()
raise
finally:
close_pop3_server_connection()
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| 31.611413 | 112 | 0.581621 |
13a56c1a124274ac135fb4660130f866c08d3e0b
| 733 |
py
|
Python
|
tests/onegov/core/test_datamanager.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/core/test_datamanager.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/core/test_datamanager.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import os
import transaction
from onegov.core.datamanager import FileDataManager
def test_file_data_manager_commit(temporary_directory):
data = 'data'.encode('utf-8')
path = '{}/a.txt'.format(temporary_directory)
FileDataManager.write_file(data, path)
assert not os.path.exists(path)
transaction.commit()
assert os.path.exists(path)
with open(path) as file:
assert file.read() == 'data'
os.remove(path)
def test_file_data_manager_abort(temporary_directory):
data = 'data'.encode('utf-8')
path = '{}/b.txt'.format(temporary_directory)
FileDataManager.write_file(data, path)
assert not os.path.exists(path)
transaction.abort()
assert not os.path.exists(path)
| 22.212121 | 55 | 0.706685 |
b9abc0d330e08eb91d69a0e9a1e877248f5ddf1a
| 7,382 |
py
|
Python
|
applications/sentiment_analysis/pp_minilm/train.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
applications/sentiment_analysis/pp_minilm/train.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
applications/sentiment_analysis/pp_minilm/train.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append("../")
import os
import argparse
import warnings
from functools import partial
import paddle
import paddle.nn.functional as F
from paddlenlp.metrics.glue import AccuracyAndF1
from paddlenlp.datasets import load_dataset
from paddlenlp.data import Pad, Stack, Tuple
from paddlenlp.transformers import PPMiniLMForSequenceClassification, PPMiniLMTokenizer, LinearDecayWithWarmup
from evaluate import evaluate
from utils import set_seed
from data import read, load_dict, convert_example_to_feature
warnings.filterwarnings("ignore")
def train():
# set running envir
paddle.set_device(args.device)
set_seed(args.seed)
if not os.path.exists(args.checkpoints):
os.mkdir(args.checkpoints)
# load and process data
label2id, id2label = load_dict(args.label_path)
train_ds = load_dataset(read, data_path=args.train_path, lazy=False)
dev_ds = load_dataset(read, data_path=args.dev_path, lazy=False)
tokenizer = PPMiniLMTokenizer.from_pretrained(args.base_model_name)
trans_func = partial(convert_example_to_feature,
tokenizer=tokenizer,
label2id=label2id,
max_seq_len=args.max_seq_len)
train_ds = train_ds.map(trans_func, lazy=False)
dev_ds = dev_ds.map(trans_func, lazy=False)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id, dtype="int64"),
Pad(axis=0, pad_val=tokenizer.pad_token_type_id, dtype="int64"),
Stack(dtype="int64"), Stack(dtype="int64")): fn(samples)
train_batch_sampler = paddle.io.BatchSampler(train_ds,
batch_size=args.batch_size,
shuffle=True)
dev_batch_sampler = paddle.io.BatchSampler(dev_ds,
batch_size=args.batch_size,
shuffle=False)
train_loader = paddle.io.DataLoader(train_ds,
batch_sampler=train_batch_sampler,
collate_fn=batchify_fn)
dev_loader = paddle.io.DataLoader(dev_ds,
batch_sampler=dev_batch_sampler,
collate_fn=batchify_fn)
# configure model training
model = PPMiniLMForSequenceClassification.from_pretrained(
args.base_model_name, num_classes=len(label2id))
num_training_steps = len(train_loader) * args.num_epochs
lr_scheduler = LinearDecayWithWarmup(learning_rate=args.learning_rate,
total_steps=num_training_steps,
warmup=args.warmup_proportion)
decay_params = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
grad_clip = paddle.nn.ClipGradByGlobalNorm(args.max_grad_norm)
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params,
grad_clip=grad_clip)
metric = AccuracyAndF1()
# start to train model
global_step, best_f1 = 1, 0.
model.train()
for epoch in range(1, args.num_epochs + 1):
for batch_data in train_loader():
input_ids, token_type_ids, _, labels = batch_data
# logits: batch_size, seql_len, num_tags
logits = model(input_ids, token_type_ids=token_type_ids)
loss = F.cross_entropy(logits, labels)
loss.backward()
lr_scheduler.step()
optimizer.step()
optimizer.clear_grad()
if global_step > 0 and global_step % args.log_steps == 0:
print(
f"epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}"
)
if (global_step > 0 and global_step % args.eval_steps
== 0) or global_step == num_training_steps:
accuracy, precision, recall, f1 = evaluate(
model, dev_loader, metric)
model.train()
if f1 > best_f1:
print(
f"best F1 performence has been updated: {best_f1:.5f} --> {f1:.5f}"
)
best_f1 = f1
paddle.save(model.state_dict(),
f"{args.checkpoints}/best.pdparams")
print(
f'evalution result: accuracy:{accuracy:.5f} precision: {precision:.5f}, recall: {recall:.5f}, F1: {f1:.5f}'
)
global_step += 1
paddle.save(model.state_dict(), f"{args.checkpoints}/final.pdparams")
if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--base_model_name", type=str, default=None, help="The name of base model.")
parser.add_argument("--train_path", type=str, default=None, help="The path of train set.")
parser.add_argument("--dev_path", type=str, default=None, help="The path of dev set.")
parser.add_argument("--label_path", type=str, default=None, help="The path of label dict.")
parser.add_argument("--num_epochs", type=int, default=3, help="Number of epoches for fine-tuning.")
parser.add_argument("--batch_size", type=int, default=32, help="Batch size per GPU/CPU for training.")
parser.add_argument("--max_seq_len", type=int, default=512, help="The maximum total input sequence length after tokenization.")
parser.add_argument("--learning_rate", type=float, default=5e-5, help="The initial learning rate for optimizer.")
parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay rate for L2 regularizer.")
parser.add_argument("--max_grad_norm", type=float, default=1.0, help="Max grad norm to clip gradient.")
parser.add_argument("--warmup_proportion", type=float, default=0.1, help="Warmup proportion params for warmup strategy")
parser.add_argument("--log_steps", type=int, default=50, help="Frequency of printing log.")
parser.add_argument("--eval_steps", type=int, default=500, help="Frequency of performing evaluation.")
parser.add_argument("--seed", type=int, default=1000, help="Random seed for initialization.")
parser.add_argument('--device', choices=['cpu', 'gpu'], default="gpu", help="Select which device to train model, defaults to gpu.")
parser.add_argument("--checkpoints", type=str, default=None, help="Directory to save checkpoint.")
args = parser.parse_args()
# yapf: enable
train()
| 45.850932 | 135 | 0.643999 |
bc7cfd5b8651bca24d23beb4c20091b29ec53fac
| 2,275 |
py
|
Python
|
pySchloss/schloss_config.py
|
ruum42/pySchloss
|
f1415b48187ef0966019051e7681ae59a274215b
|
[
"Apache-2.0"
] | 12 |
2015-02-14T15:15:40.000Z
|
2020-06-23T12:32:05.000Z
|
pySchloss/schloss_config.py
|
hassoon1986/pySchloss
|
f1415b48187ef0966019051e7681ae59a274215b
|
[
"Apache-2.0"
] | null | null | null |
pySchloss/schloss_config.py
|
hassoon1986/pySchloss
|
f1415b48187ef0966019051e7681ae59a274215b
|
[
"Apache-2.0"
] | 7 |
2015-07-29T18:54:37.000Z
|
2021-01-27T17:24:37.000Z
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
from itertools import combinations
__schloss_data_directory__ = '../data/'
schloss_pickle_file = 'pySchloss.pkl'
schloss_ini_file = 'config.ini'
ini_section_alias = 'Alias'
import os
import os.path
import pickle
import ConfigParser
import re
class ProjectPathNotFound(Exception):
"""Raised when we can't find the project directory."""
def load_config():
data_file = get_data_file(schloss_pickle_file)
if not os.path.isfile(data_file):
return {"alias":{}}
else:
with open(data_file,'rb') as f:
return pickle.load(f)
def save_config(config):
with open(get_data_file(schloss_pickle_file),'wb') as f:
pickle.dump(config, f)
f.close()
def load_ini():
comment = re.compile('^#')
entry = re.compile('(.*)=(.*)$')
ini_file = get_data_file(schloss_ini_file)
ini = {}
if os.path.isfile(ini_file):
with file(ini_file, 'r') as f:
ini = {}
for line in f:
if not comment.findall(line):
e = entry.findall(line)
if e:
kv = e[0]
ini[kv[0].strip()] = kv[1].strip()
return ini
def save_ini(ini):
keys = ini.keys()
keys.sort()
with open(get_data_file(schloss_ini_file), 'w') as configfile:
for k in keys:
configfile.write("{0} = {1}{2}".format(k, ini[k], os.linesep))
def get_data_file(*path_segments):
"""Get the full path to a data file.
Returns the path to a file underneath the data directory (as defined by
`get_data_path`). Equivalent to os.path.join(get_data_path(),
*path_segments).
"""
return os.path.join(get_data_path(), *path_segments)
def get_data_path():
"""Retrieve pySchloss data path
This path is by default <hat_wrap_lib_path>/../data/ in trunk
and /usr/share/pySchloss in an installed version but this path
is specified at installation time.
"""
# Get pathname absolute or relative.
path = os.path.join(
os.path.dirname(__file__), __schloss_data_directory__)
abs_data_path = os.path.abspath(path)
if not os.path.exists(abs_data_path):
raise ProjectPathNotFound
return abs_data_path
| 27.743902 | 75 | 0.62989 |
bc9a6425655ae894a596fddd4da8d573705aaa16
| 3,729 |
py
|
Python
|
parser/Parser.py
|
weidler/tyrex
|
76a3d2c36405b1213f230a7a414c2741237933f8
|
[
"MIT"
] | 1 |
2015-12-08T15:11:15.000Z
|
2015-12-08T15:11:15.000Z
|
parser/Parser.py
|
weidler/tyrex
|
76a3d2c36405b1213f230a7a414c2741237933f8
|
[
"MIT"
] | null | null | null |
parser/Parser.py
|
weidler/tyrex
|
76a3d2c36405b1213f230a7a414c2741237933f8
|
[
"MIT"
] | null | null | null |
import re
from html2text import *
class Parser():
"""
Parser class that contains basic functionality for file reading and the main normalization method.
Can be used to parse a single file and get the result. To process multiple Files use MultiParser (subclass)
@parameters
filename string the name/path of the file that is supposed to be normalized
"""
def __init__(self, filename):
"""
@attributes
self.filename string the name/path of the file that is supposed to be normalized
self.text string file content gets automatically read into this variable when object is instanciated
"""
self.filename = filename
self.text = self.readFileAtPath(self.filename)
def readFileAtPath(self, posix_path):
"""
Reads a file at a given path. Looks for utf-8/latin-1 encoding. Converts HTML Markup to Text.
@parameters
posix_path string the concerned filepath at which the method should read
@returns string html-free content of filepath
bool FALSE if encoding unknown or file not found
"""
try:
with open(posix_path, encoding="utf-8") as f: # general encoding
return html2text(f.read())
except UnicodeDecodeError:
try:
with open(posix_path, encoding="latin-1") as f: # german language encoding
return html2text(f.read())
except:
print("DECODE ERROR")
return False
except IOError:
print("FILE NOT FOUND")
return False
except Exception as e:
print("UNKNOWN ERROR\n" + e)
return False
def convertToNormalized(self, unnormalized):
"""
Converts a text to its normalized version.
@parameters
unnormalized string the unnormalized text that will be converted
@variables
out string the output string, originally unnormalized text
@returns string normalized text
"""
#sentence bounds
#return unnormalized # skip
phrase = "<s>", "</s>"
#punctuations
punct = "<punct>" # .
question = "<question>" # ?
excl = "<exclamation>" # !
susp = "<suspension>" # ...
comma = "<comma>" # ,
colon = "<colon>" # :
semicolon = "<semicolon>" # ;
think = "<thinking>" # -
#apostroph
#direct = ("<speech>", "</speech>")
#apo = ("<apo>", "</apo>")
#regex
phrase_bound = punct + "|" + question + "|" + excl + "|" + "\n{2,}"
phrase_match = "(?=((" + phrase_bound + "|^)(((.|\s)+?)(" + phrase_bound + "))))"
#ANNOTATING...
#tags
out = re.sub("\.{3,}", susp, unnormalized)
out = re.sub("\.", punct, out)
out = re.sub("\?", question, out)
out = re.sub("\!", excl, out)
out = re.sub("\,", comma, out)
out = re.sub("\:", colon, out)
out = re.sub("\;", semicolon, out)
out = re.sub("\s- ", think, out)
out = re.sub("[\*\_]|\#{1,} ", "", out) # remove markdown
out = re.sub("\[(.*?|\s*?)\]|\||-{2,}|\t|\/", "", out) # remove unnecessary characters
out = re.sub("(\n|^)\s+\n", "\n\n", out) # remove lines only containing whitespaces
out = re.sub("\n +", "\n", out) # remove whitespaces preceding any lines
out = re.sub("^\s+", "", out) # remove initial whitespaces
out = re.sub(" {2,}", " ", out) # reduce multi space
out = out.replace("\\", "")
phrases = re.findall(phrase_match, out)
clean_phrases = [phrases[i][2] for i in range(len(phrases)) if phrases[i][3] != phrases[i-1][3]]
out = "".join([phrase[0] + match + phrase[1] for match in clean_phrases]) #sentence bounds
# order the linebreaks and sentence bounds
while re.search("[\n\r]\</s\>", out) or re.search("\<s\>[\n\r]", out):
out = re.sub("\n\<\/s\>", "</s>\n", out)
out = re.sub("\<s\>[ \t]*\n", "\n<s>", out)
out = re.sub("<s><\/s>", "", out)
#out = re.sub("[^\s]<", lambda match: match[0] + " " + match[1], out) #have all elements seperated by space
return out
| 30.818182 | 110 | 0.621614 |
4c11e5baccb94977cda069e361af3664723d83d0
| 474 |
py
|
Python
|
documents/normal-distribution-z/generate_numbers.py
|
RalfGuder/LaTeX-examples
|
a1bf9fe422969be1ca4674394ebd2170c07f7693
|
[
"MIT"
] | 1,231 |
2015-01-07T04:04:25.000Z
|
2022-03-31T17:43:29.000Z
|
documents/normal-distribution-z/generate_numbers.py
|
DoubleL61/LaTeX-examples
|
cd0d97f85fadb59b7c6e9062b37a8bf7d725ba0c
|
[
"MIT"
] | 5 |
2015-05-10T13:10:47.000Z
|
2021-05-02T21:28:49.000Z
|
documents/normal-distribution-z/generate_numbers.py
|
DoubleL61/LaTeX-examples
|
cd0d97f85fadb59b7c6e9062b37a8bf7d725ba0c
|
[
"MIT"
] | 400 |
2015-01-05T06:22:18.000Z
|
2022-03-19T04:07:59.000Z
|
#!/usr/bin/env python
"""
Generate the LaTeX code for a table of the PPF of a normal distribution.
PPF stands for Percent point function (inverse of cdf - percentiles).
"""
from scipy.stats import norm
from numpy import arange
for x in arange(0.0, 1.0, 0.1):
line = "\\textbf{%0.1f} & " % x
values = [norm.ppf(x + dx) for dx in arange(0.00, 0.09 + 0.01, 0.01)]
values = ["%0.4f" % el for el in values]
line += " & ".join(values)
print(line + "\\\\")
| 26.333333 | 73 | 0.618143 |
d5d931ddc81a15284e6bbab913da86a367341866
| 1,620 |
py
|
Python
|
sentinel/vpn/utils.py
|
allagog0x01/sentwg
|
52285ecf2b03c30a78901a29a7af96c8ab5764c8
|
[
"Apache-2.0"
] | null | null | null |
sentinel/vpn/utils.py
|
allagog0x01/sentwg
|
52285ecf2b03c30a78901a29a7af96c8ab5764c8
|
[
"Apache-2.0"
] | null | null | null |
sentinel/vpn/utils.py
|
allagog0x01/sentwg
|
52285ecf2b03c30a78901a29a7af96c8ab5764c8
|
[
"Apache-2.0"
] | null | null | null |
import re
import subprocess
def convert_to_seconds(time_in_words):
secs = 0
def to_secs(s):
mat = re.match(r"((?P<hours>\d+)\s?hour)?\s?((?P<minutes>\d+)\s?min)?\s?((?P<seconds>\d+)\s?sec)?", s)
secs = 0
secs += int(mat.group("hours")) * 3600 if mat.group("hours") else 0
secs += int(mat.group("minutes")) * 60 if mat.group("minutes") else 0
secs += int(mat.group("seconds")) if mat.group("seconds") else 0
return secs
for s in time_in_words.split(','):
secs = secs + to_secs(s)
return secs
def convert_bandwidth(bandwidth):
download, upload = 0.0, 0.0
def to_bytes(num, type):
try:
if 'KiB' in type:
return num * 1024.0
elif 'MiB' in type:
return num * 1024.0 * 1024
elif 'GiB' in type:
return num * 1024.0 * 1024 * 1024
else:
return num
except TypeError as e:
print("The following exception has occured : {}".format(e))
return None
for s in bandwidth.split(','):
if 'received' in s:
a = s.replace('received', '').strip().split(' ')
upload = to_bytes(float(a[0]), str(a[1]))
if not upload:
return None,"Exception raised"
elif 'sent' in s:
a = s.replace('sent', '').strip().split(' ')
download = to_bytes(float(a[0]), str(a[1]))
if not download:
return None,"Exception raised"
return {
'download': download,
'upload': upload
},None
| 30.566038 | 110 | 0.511111 |
fc4905d6abd1cf0caa00831384e676c94e297162
| 2,109 |
py
|
Python
|
software/supervisor/views/HandyNbView.py
|
ghsecuritylab/project-powerline
|
6c0ec13bbfc11c3790c506f644db4fe45021440a
|
[
"MIT"
] | null | null | null |
software/supervisor/views/HandyNbView.py
|
ghsecuritylab/project-powerline
|
6c0ec13bbfc11c3790c506f644db4fe45021440a
|
[
"MIT"
] | null | null | null |
software/supervisor/views/HandyNbView.py
|
ghsecuritylab/project-powerline
|
6c0ec13bbfc11c3790c506f644db4fe45021440a
|
[
"MIT"
] | 1 |
2020-03-08T01:50:58.000Z
|
2020-03-08T01:50:58.000Z
|
"""
comment
"""
from PyQt5.QtWidgets import QWidget, QPushButton, QGridLayout, QLineEdit
class HandyNbView(QWidget):
def __init__(self, parent):
super(HandyNbView, self).__init__(parent)
self.handy_nb = '' #saved handynumber
self.grid = QGridLayout()
self.grid.setSpacing(2)
self.setLayout(self.grid)
self.nr_edit = QLineEdit(self)
if self.handy_nb == '':
self.nr_edit.setText('Handynummer eingeben')
else:
self.nr_edit.setText(self.handy_nb)
self.grid.addWidget(self.nr_edit, 0, 0, 1, 3)
self.clear_bt = QPushButton('\u232B')
self.clear_bt.clicked.connect(self.clear_nb)
self.clear_bt.setFixedSize(40, 40)
self.grid.addWidget(self.clear_bt, 0, 3)
names = ['7', '8', '9', '*', #numbreButtons
'4', '5', '6', '0',
'1', '2', '3', '#']
positions = [(i + 1, j) for i in range(3) for j in range(4)]
self.buttons = {}
for position, name in zip(positions, names):
self.buttons[name] = QPushButton(name)
self.buttons[name].setFixedSize(40,40)
self.buttons[name].clicked.connect(self.enter_character)
self.grid.addWidget(self.buttons[name], *position)
self.cancel_bt = QPushButton("Zurück")
self.cancel_bt.clicked.connect(self.cancel_nb)
self.grid.addWidget(self.cancel_bt, 5, 0, 2, 2)
self.save_bt = QPushButton("Speichern")
self.save_bt.clicked.connect(self.save_nb)
self.grid.addWidget(self.save_bt, 5, 2, 2, 2)
self.nr_edit.selectAll()
self.nr_edit.setFocus()
def clear_nb(self):
self.nr_edit.backspace()
def enter_character(self):
character = self.sender().text()
self.nr_edit.insert(character)
def save_nb(self):
self.handy_nb = self.nr_edit.text()
#self.parent().parent().show_home_view()
def cancel_nb(self):
self.nr_edit.setText(self.handy_nb)
self.parent().parent().show_configuration_view()
| 31.477612 | 72 | 0.598388 |
53a3256e928332e13e9971b0e14d74510bc2fb51
| 3,786 |
py
|
Python
|
20-hs-redez-sem/groups/01-decentFS/misc/bacnet1/event.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 8 |
2020-03-17T21:12:18.000Z
|
2021-12-12T15:55:54.000Z
|
20-hs-redez-sem/groups/01-decentFS/misc/bacnet1/event.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 2 |
2021-07-19T06:18:43.000Z
|
2022-02-10T12:17:58.000Z
|
20-hs-redez-sem/groups/01-decentFS/misc/bacnet1/event.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 25 |
2020-03-20T09:32:45.000Z
|
2021-07-18T18:12:59.000Z
|
#!/usr/bin/env python3
# lib/event.py
# Jan 2020 <[email protected]>
''' event data structure (="log entry")
+-event------------------------------------------------------------------+
| +-meta---------------------------------------+ |
| | feed_id, seq_no, h_prev, sign_info, h_cont |, signature, opt_content |
| +--------------------------------------------+ |
+------------------------------------------------------------------------+
event :== cbor( [ meta, signature, opt_content ] )
meta :== cbor( [ feed_id, seq_no, h_prev, sign_info, h_cont ] )
h_prev :== [hash_info, "hash value of prev event's meta field"]
signature :== "signature of meta"
h_cont :== [hash_info, "hash value of opt_content"]
sign_info: enum (0=ed25519)
hash_info: enum (0=sha256)
opt_content :== cbor( data ) # must be bytes so we can compute a hash)
# how to start Wireshark with BACnet event parsing:
wireshark -X lua_script:bacnet.lua PCAPFILE
'''
import hashlib
import cbor2
import crypto
# hash info
HASHINFO_SHA256 = 0
HASHINFO_SHA512 = 1
HASHINFO_MD5 = 2
HASHINFO_SHA1 = 3
# ---------------------------------------------------------------------------
def serialize(ds):
return cbor2.dumps(ds)
def deserialize(s):
return cbor2.loads(s)
# ---------------------------------------------------------------------------
class EVENT:
def __init__(self, fid=None, seq=1, hprev=None, content=None,
digestmod='sha256'):
self.wire, self.metabits, self.sinfo = None, None, -1
self.fid, self.seq, self.hprev = fid, seq, hprev
self.contbits = serialize(content)
self.set_digestmod(digestmod)
def set_digestmod(self, digestmod):
self.digestmod = digestmod
self.get_hash = lambda buf: getattr(hashlib,digestmod)(buf).digest()
self.hinfo = {
'md5' : HASHINFO_MD5,
'sha1' : HASHINFO_SHA1,
'sha256' : HASHINFO_SHA256,
'sha512' : HASHINFO_SHA512
}[digestmod]
def from_wire(self, w):
self.wire = w
e = deserialize(w)
self.metabits, self.signature = e[:2]
self.contbits = None if len(e) < 2 else e[2]
self.fid, self.seq, self.hprev, self.sinfo, self.hcont = \
deserialize(self.metabits)[:5]
hval = self.hprev[1] if self.hprev != None else self.hcont[1]
dm = 'sha256'
if len(hval) == 16:
dm = 'md5'
elif len(hval) == 20:
dm = 'sha1'
self.set_digestmod(dm)
def get_ref(self):
return [self.hinfo, self.get_hash(self.metabits)]
def mk_metabits(self, sign_info):
self.sinfo = sign_info
meta = [self.fid, self.seq, self.hprev, self.sinfo,
[self.hinfo, self.get_hash(self.contbits)]]
self.metabits = serialize(meta)
return self.metabits
def to_wire(self, signature):
# must be called after having called mk_metabits()
if self.wire != None:
return self.wire
self.signature = signature
self.wire = serialize([ self.metabits, signature, self.contbits ])
return self.wire
def chk_content(self):
return self.hcont == self.get_hash(self.contbits)
def content(self):
return None if self.contbits == None \
else deserialize(self.contbits)
def __str__(self):
e = deserialize(self.wire)
e[0] = deserialize(e[0])
e[2] = deserialize(e[2])
return "e - " + str(e)
pass
# ----------------------------------------------------------------------
# eof
| 29.811024 | 80 | 0.506075 |
ab189e4fdea78e471d1598412dbe5435326aa0b3
| 3,138 |
py
|
Python
|
src/onegov/town6/views/resource.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/town6/views/resource.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/town6/views/resource.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.core.security import Public, Private
from onegov.org.views.resource import view_resources, get_room_form, \
get_daypass_form, handle_new_room, handle_new_daypass, \
get_resource_form, handle_edit_resource, view_resource, \
handle_cleanup_allocations, view_occupancy, \
view_resource_subscribe, view_export, get_item_form, \
handle_new_resource_item
from onegov.reservation import ResourceCollection, Resource
from onegov.town6 import TownApp
from onegov.org.forms import ResourceCleanupForm, ResourceExportForm
from onegov.town6.layout import ResourcesLayout, ResourceLayout
@TownApp.html(model=ResourceCollection, template='resources.pt',
permission=Public)
def town_view_resources(self, request):
return view_resources(self, request, ResourcesLayout(self, request))
@TownApp.form(model=ResourceCollection, name='new-room',
template='form.pt', permission=Private, form=get_room_form)
def town_handle_new_room(self, request, form):
return handle_new_room(self, request, form, ResourcesLayout(self, request))
@TownApp.form(model=ResourceCollection, name='new-daypass',
template='form.pt', permission=Private, form=get_daypass_form)
def town_handle_new_daypass(self, request, form):
return handle_new_daypass(
self, request, form, ResourcesLayout(self, request))
@TownApp.form(model=ResourceCollection, name='new-daily-item',
template='form.pt', permission=Private, form=get_item_form)
def town_handle_new_resource_item(self, request, form):
return handle_new_resource_item(
self, request, form, ResourcesLayout(self, request))
@TownApp.form(model=Resource, name='edit', template='form.pt',
permission=Private, form=get_resource_form)
def town_handle_edit_resource(self, request, form):
return handle_edit_resource(
self, request, form, ResourceLayout(self, request))
@TownApp.html(model=Resource, template='resource.pt', permission=Public)
def town_view_resource(self, request):
return view_resource(self, request, ResourceLayout(self, request))
@TownApp.form(model=Resource, permission=Private, name='cleanup',
form=ResourceCleanupForm, template='resource_cleanup.pt')
def town_handle_cleanup_allocations(self, request, form):
return handle_cleanup_allocations(
self, request, form, ResourceLayout(self, request))
@TownApp.html(model=Resource, permission=Private, name='occupancy',
template='resource_occupancy.pt')
def town_view_occupancy(self, request):
return view_occupancy(self, request, ResourceLayout(self, request))
@TownApp.html(model=Resource, template='resource-subscribe.pt',
permission=Private, name='subscribe')
def town_view_resource_subscribe(self, request):
return view_resource_subscribe(
self, request, ResourceLayout(self, request))
@TownApp.form(model=Resource, permission=Private, name='export',
template='export.pt', form=ResourceExportForm)
def town_view_export(self, request, form):
return view_export(self, request, form, ResourceLayout(self, request))
| 40.753247 | 79 | 0.758445 |
db61ecd69f978e84c7f3a5a9f70e95563d333e02
| 235 |
py
|
Python
|
7-assets/_SNIPPETS/bryan-guner-gists/pyenum2string/enum-2-string.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/_SNIPPETS/bryan-guner-gists/pyenum2string/enum-2-string.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/_SNIPPETS/bryan-guner-gists/pyenum2string/enum-2-string.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
# Converts an enumeration to a printable string.
#
def enumToString(constants, enum, elem):
all = constants.all_values(enum)
for e in all.keys():
if str(elem) == str(all[e]):
return e
return "<unknown>"
| 26.111111 | 48 | 0.621277 |
91e5ffdbcbeb8fd8095cb23f7f2b1bf4d5539098
| 5,806 |
py
|
Python
|
paddlenlp/datasets/wmt14ende.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/datasets/wmt14ende.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/datasets/wmt14ende.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
import collections
import os
import warnings
from paddle.io import Dataset
from paddle.dataset.common import md5file
from paddle.utils.download import get_path_from_url
from paddlenlp.utils.env import DATA_HOME
from . import DatasetBuilder
__all__ = ['WMT14ende']
class WMT14ende(DatasetBuilder):
'''
This dataset is a translation dataset for machine translation task. More
specifically, this dataset is a WMT14 English to German translation dataset
which uses commoncrawl, europarl and news-commentary as train dataset and
uses newstest2014 as test dataset.
'''
URL = "https://bj.bcebos.com/paddlenlp/datasets/WMT14.en-de.tar.gz"
META_INFO = collections.namedtuple(
'META_INFO', ('src_file', 'tgt_file', 'src_md5', 'tgt_md5'))
SPLITS = {
'train':
META_INFO(
os.path.join("WMT14.en-de", "wmt14_ende_data_bpe",
"train.tok.clean.bpe.33708.en"),
os.path.join("WMT14.en-de", "wmt14_ende_data_bpe",
"train.tok.clean.bpe.33708.de"),
"c7c0b77e672fc69f20be182ae37ff62c",
"1865ece46948fda1209d3b7794770a0a"),
'dev':
META_INFO(
os.path.join("WMT14.en-de", "wmt14_ende_data_bpe",
"newstest2013.tok.bpe.33708.en"),
os.path.join("WMT14.en-de", "wmt14_ende_data_bpe",
"newstest2013.tok.bpe.33708.de"),
"aa4228a4bedb6c45d67525fbfbcee75e",
"9b1eeaff43a6d5e78a381a9b03170501"),
'test':
META_INFO(
os.path.join("WMT14.en-de", "wmt14_ende_data_bpe",
"newstest2014.tok.bpe.33708.en"),
os.path.join("WMT14.en-de", "wmt14_ende_data_bpe",
"newstest2014.tok.bpe.33708.de"),
"c9403eacf623c6e2d9e5a1155bdff0b5",
"0058855b55e37c4acfcb8cffecba1050"),
'dev-eval':
META_INFO(
os.path.join("WMT14.en-de", "wmt14_ende_data",
"newstest2013.tok.en"),
os.path.join("WMT14.en-de", "wmt14_ende_data",
"newstest2013.tok.de"),
"d74712eb35578aec022265c439831b0e",
"6ff76ced35b70e63a61ecec77a1c418f"),
'test-eval':
META_INFO(
os.path.join("WMT14.en-de", "wmt14_ende_data",
"newstest2014.tok.en"),
os.path.join("WMT14.en-de", "wmt14_ende_data",
"newstest2014.tok.de"),
"8cce2028e4ca3d4cc039dfd33adbfb43",
"a1b1f4c47f487253e1ac88947b68b3b8")
}
VOCAB_INFO = [(os.path.join("WMT14.en-de", "wmt14_ende_data_bpe",
"vocab_all.bpe.33708"),
"2fc775b7df37368e936a8e1f63846bb0"),
(os.path.join("WMT14.en-de", "wmt14_ende_data_bpe",
"vocab_all.bpe.33712"),
"de485e3c2e17e23acf4b4b70b54682dd")]
UNK_TOKEN = "<unk>"
BOS_TOKEN = "<s>"
EOS_TOKEN = "<e>"
MD5 = "a2b8410709ff760a3b40b84bd62dfbd8"
def _get_data(self, mode, **kwargs):
default_root = os.path.join(DATA_HOME, self.__class__.__name__)
src_filename, tgt_filename, src_data_hash, tgt_data_hash = self.SPLITS[
mode]
src_fullname = os.path.join(default_root, src_filename)
tgt_fullname = os.path.join(default_root, tgt_filename)
(bpe_vocab_filename, bpe_vocab_hash), (sub_vocab_filename,
sub_vocab_hash) = self.VOCAB_INFO
bpe_vocab_fullname = os.path.join(default_root, bpe_vocab_filename)
sub_vocab_fullname = os.path.join(default_root, sub_vocab_filename)
if (not os.path.exists(src_fullname) or
(src_data_hash and not md5file(src_fullname) == src_data_hash)) or (
not os.path.exists(tgt_fullname) or
(tgt_data_hash and not md5file(tgt_fullname) == tgt_data_hash)
) or (not os.path.exists(bpe_vocab_fullname) or
(bpe_vocab_hash
and not md5file(bpe_vocab_fullname) == bpe_vocab_hash)) or (
not os.path.exists(sub_vocab_fullname) or
(sub_vocab_hash
and not md5file(sub_vocab_fullname) == sub_vocab_hash)):
get_path_from_url(self.URL, default_root, self.MD5)
return src_fullname, tgt_fullname
def _read(self, filename, *args):
src_filename, tgt_filename = filename
with open(src_filename, 'r', encoding='utf-8') as src_f:
with open(tgt_filename, 'r', encoding='utf-8') as tgt_f:
for src_line, tgt_line in zip(src_f, tgt_f):
src_line = src_line.strip()
tgt_line = tgt_line.strip()
if not src_line and not tgt_line:
continue
yield {"en": src_line, "de": tgt_line}
def get_vocab(self):
bpe_vocab_fullname = os.path.join(DATA_HOME, self.__class__.__name__,
self.VOCAB_INFO[0][0])
sub_vocab_fullname = os.path.join(DATA_HOME, self.__class__.__name__,
self.VOCAB_INFO[1][0])
vocab_info = {
'bpe': {
'filepath': bpe_vocab_fullname,
'unk_token': self.UNK_TOKEN,
'bos_token': self.BOS_TOKEN,
'eos_token': self.EOS_TOKEN
},
'benchmark': {
'filepath': sub_vocab_fullname,
'unk_token': self.UNK_TOKEN,
'bos_token': self.BOS_TOKEN,
'eos_token': self.EOS_TOKEN
}
}
return vocab_info
| 43.007407 | 80 | 0.573028 |
72bf9863fc14133026673011d872899fc96220c8
| 690 |
py
|
Python
|
_get_realized_.py
|
paulowiz/AiesecBot
|
ac77cc5426ed6382772603afa8015208020c0fba
|
[
"MIT"
] | 6 |
2019-10-18T17:47:30.000Z
|
2021-03-18T06:04:06.000Z
|
_get_realized_.py
|
paulowiz/AiesecBot
|
ac77cc5426ed6382772603afa8015208020c0fba
|
[
"MIT"
] | 1 |
2020-09-24T08:17:29.000Z
|
2020-09-28T08:16:39.000Z
|
_get_realized_.py
|
paulowiz/AiesecBot
|
ac77cc5426ed6382772603afa8015208020c0fba
|
[
"MIT"
] | 3 |
2019-10-20T18:40:20.000Z
|
2021-04-15T01:27:59.000Z
|
import psycopg2.extras
from controller import RobotRotine as rr
from api import graphqlconsume, querygraphql
import time
import datetime
import numpy as np
import schedule
def job():
robo2 = rr.RobotRotine()
dtfim = np.datetime64(datetime.datetime.now())
dtinit = np.datetime64(dtfim) - np.timedelta64(110, 'm')
print('Função Realized')
print(dtinit)
print(dtfim)
print('-')
robo2.ExecutaRotina('date_realized', dtinit,
dtfim, 1)
print('Periodo Executado com sucesso')
schedule.every(100).minutes.do(job)
print('Esperando o proximo intervalo para executar.......')
while True:
schedule.run_pending()
time.sleep(1)
| 25.555556 | 60 | 0.688406 |
72ced540713909e5a3fb6f18cdbe13858a40b871
| 28,056 |
py
|
Python
|
Packs/Workday/Integrations/WorkdayIAMEventsGenerator/WorkdayIAMEventsGenerator.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Workday/Integrations/WorkdayIAMEventsGenerator/WorkdayIAMEventsGenerator.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Workday/Integrations/WorkdayIAMEventsGenerator/WorkdayIAMEventsGenerator.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
# noqa: F401
from flask import Flask, jsonify
from gevent.pywsgi import WSGIServer
from CommonServerPython import *
FIRST_RUN_REPORT = {
"Report_Entry": [
{
"Employee_Type": "Regular",
"Leadership": "Yes-HQ",
"Work_Country_Code": "840",
"Street_Address": "3000 Tannery Way",
"Employment_Status": "Active",
"VP_Flag": "N",
"Mgr_ID": "115069",
"Cost_Center_Description": "Channel Sales",
"GDPR_Country_Flag": "0",
"Director_Flag": "Y",
"Email_-_Primary_Home": "[email protected]",
"First_Name": "Ronny",
"Last_Hire_Date": "10/05/2020",
"People_Manager_Flag": "N",
"Department": "Sales NAM:NAM Channel Sales",
"Workday_ID": "5aa443c785ff10461ac83e5a6be32e1e",
"Postal_Code": "95054",
"Rehired_Employee": "Yes",
"Org_Level_1": "Sales",
"Org_Level_3": "NAM Channel Sales",
"Country_Name": "United States Of America",
"Org_Level_2": "Sales NAM",
"Emp_ID": "100122",
"Job_Family": "Product Management",
"User_Name": "[email protected]",
"Preferred_Name_-_First_Name": "Ronny",
"Prehire_Flag": "False",
"Management_Level_1": "Nikesh Arora",
"Work_Country_Abbrev": "US",
"Management_Level_2": "Timmy Turner",
"Email_Address": "[email protected]",
"Title": "Dir, Product Line Manager",
"City": "Santa Clara",
"Work_State_US_Only": "California",
"Job_Code": "2245",
"PAN_CF_Okta_Location_Region": "Americas",
"Last_Name": "Rahardjo",
"Job_Function": "Product Management Function",
"State": "California",
"Exec_Admin_Flag": "N",
"Preferred_Name": "Ronny Rahardjo",
"Regular_Employee_Flag": "Y",
"Preferred_Name_-_Last_Name": "Rahardjo",
"Cost_Center_Code": "120100",
"Location": "Office - USA - CA - Headquarters",
"Last_Day_of_Work": "02/15/2021",
"Termination_Date": "02/15/2021",
"Hire_Date": "01/01/2010"
},
{
"Employee_Type": "Regular",
"Leadership": "No",
"Work_Country_Code": "840",
"Street_Address": "WeWork Embarcadero Center",
"Employment_Status": "Active",
"VP_Flag": "N",
"Mgr_ID": "115069",
"Cost_Center_Description": "Magnifier Sales Inc",
"GDPR_Country_Flag": "0",
"Public_Work_Mobile_Phone_Number": "+44 7900-160-819",
"Director_Flag": "N",
"Email_-_Primary_Home": "[email protected]",
"First_Name": "Stephen",
"Last_Hire_Date": "10/01/2020",
"People_Manager_Flag": "N",
"Department": "WW Sales Functions:Cortex Sales",
"Workday_ID": "5aa443c785ff10461a941c31a173e459",
"Postal_Code": "94111",
"Rehired_Employee": "Yes",
"Org_Level_1": "Sales",
"Org_Level_3": "Cortex Sales",
"Country_Name": "United States Of America",
"Org_Level_2": "WW Sales Functions",
"Emp_ID": "101351",
"Job_Family": "Software Engineering",
"User_Name": "[email protected]",
"Preferred_Name_-_First_Name": "Stephen",
"Prehire_Flag": "False",
"Management_Level_1": "Nikesh Arora",
"Work_Country_Abbrev": "US",
"Management_Level_2": "Timmy Turner",
"Email_Address": "[email protected]",
"Title": "Mgr, SW Engineering",
"City": "San Francisco",
"Work_State_US_Only": "California",
"Job_Code": "2163",
"PAN_CF_Okta_Location_Region": "Americas",
"Last_Name": "Arnold",
"Job_Function": "Engineering Function",
"State": "California",
"Exec_Admin_Flag": "N",
"Preferred_Name": "Stephen Arnold",
"Regular_Employee_Flag": "Y",
"Preferred_Name_-_Last_Name": "Arnold",
"Cost_Center_Code": "101100",
"Location": "Office - USA - CA - San Francisco",
"Last_Day_of_Work": "02/15/2021",
"Termination_Date": "02/15/2021",
"Hire_Date": "01/01/2010"
},
{
"Employee_Type": "Regular",
"Leadership": "No",
"Work_Country_Code": "840",
"Street_Address": "3000 Tannery Way",
"Employment_Status": "Active",
"VP_Flag": "N",
"Mgr_ID": "115069",
"Cost_Center_Description": "IoT - Engineering",
"GDPR_Country_Flag": "0",
"Director_Flag": "N",
"Email_-_Primary_Home": "[email protected]",
"First_Name": "Tooth",
"Last_Hire_Date": "06/15/2020",
"People_Manager_Flag": "N",
"Department": "Enterprise R&D:FWaaP",
"Workday_ID": "9aa7e309929e01ebec7923080803461b",
"Postal_Code": "95054",
"Rehired_Employee": "No",
"Org_Level_1": "All R&D",
"Org_Level_3": "FWaaP",
"Country_Name": "United States Of America",
"Org_Level_2": "Enterprise R&D",
"Emp_ID": "115104",
"Job_Family": "Software Engineering",
"Preferred_Name_-_First_Name": "Tooth",
"Prehire_Flag": "False",
"Management_Level_1": "Nikesh Arora",
"Work_Country_Abbrev": "US",
"Management_Level_2": "Timmy Turner",
"Email_Address": "[email protected]",
"Title": "Staff Engineer SW",
"City": "Santa Clara",
"Work_State_US_Only": "California",
"Job_Code": "5162",
"PAN_CF_Okta_Location_Region": "Americas",
"Last_Name": "Fairy_Updated",
"Job_Function": "Engineering Function",
"State": "California",
"Exec_Admin_Flag": "N",
"Preferred_Name": "Tooth Fairy_Updated",
"Regular_Employee_Flag": "Y",
"Preferred_Name_-_Last_Name": "Fairy_Updated",
"Cost_Center_Code": "613116",
"Location": "Office - USA - CA - Headquarters",
"Last_Day_of_Work": "02/15/2021",
"Termination_Date": "02/15/2021",
"Hire_Date": "01/01/2010"
},
{
"Employee_Type": "Regular",
"Leadership": "No",
"Work_Country_Code": "840",
"Street_Address": "3000 Tannery Way",
"Employment_Status": "Active",
"VP_Flag": "N",
"Mgr_ID": "115069",
"Cost_Center_Description": "Consulting Systems Engineering",
"GDPR_Country_Flag": "0",
"Director_Flag": "N",
"Email_-_Primary_Home": "[email protected]",
"First_Name": "Remy",
"Last_Hire_Date": "06/15/2020",
"People_Manager_Flag": "N",
"Department": "WW Sales Functions:WW SE Sales",
"Workday_ID": "9aa7e309929e01830c041f1c08039323",
"Postal_Code": "95054",
"Rehired_Employee": "No",
"Org_Level_1": "Sales",
"Org_Level_3": "WW SE Sales",
"Country_Name": "United States Of America",
"Org_Level_2": "WW Sales Functions",
"Emp_ID": "115094",
"Job_Family": "Software Engineering",
"User_Name": "[email protected]",
"Preferred_Name_-_First_Name": "Remy",
"Prehire_Flag": "False",
"Management_Level_1": "Nikesh Arora",
"Work_Country_Abbrev": "US",
"Management_Level_2": "Timmy Turner",
"Email_Address": "[email protected]",
"Title": "Staff Engineer Software",
"City": "Santa Clara",
"Work_State_US_Only": "California",
"Job_Code": "5162",
"PAN_CF_Okta_Location_Region": "Americas",
"Last_Name": "Buxaplenty",
"Job_Function": "Engineering Function",
"State": "California",
"Exec_Admin_Flag": "N",
"Preferred_Name": "Remy Buxaplenty",
"Regular_Employee_Flag": "Y",
"Preferred_Name_-_Last_Name": "Buxaplenty",
"Cost_Center_Code": "310100",
"Location": "Office - USA - CA - Headquarters",
"Last_Day_of_Work": "02/15/2021",
"Termination_Date": "02/15/2021",
"Hire_Date": "01/01/2010"
},
{
"Employee_Type": "Regular",
"Leadership": "No",
"Work_Country_Code": "840",
"Street_Address": "3000 Tannery Way",
"Employment_Status": "Active",
"VP_Flag": "N",
"Mgr_ID": "115069",
"Cost_Center_Description": "IoT - PM",
"GDPR_Country_Flag": "0",
"Director_Flag": "N",
"Email_-_Primary_Home": "[email protected]",
"First_Name": "Norm",
"Last_Hire_Date": "06/15/2020",
"People_Manager_Flag": "N",
"Department": "Enterprise R&D:FWaaP",
"Workday_ID": "9aa7e309929e0125823a032108030b25",
"Postal_Code": "95054",
"Rehired_Employee": "No",
"Org_Level_1": "All R&D",
"Org_Level_3": "FWaaP",
"Country_Name": "United States Of America",
"Org_Level_2": "Enterprise R&D",
"Emp_ID": "115092",
"Job_Family": "Product Management",
"User_Name": "[email protected]",
"Preferred_Name_-_First_Name": "Norm",
"Prehire_Flag": "False",
"Management_Level_1": "Nikesh Arora",
"Work_Country_Abbrev": "US",
"Management_Level_2": "Timmy Turner",
"Email_Address": "[email protected]",
"Title": "Sr Prod Mgr",
"City": "Santa Clara",
"Work_State_US_Only": "California",
"Job_Code": "5224",
"PAN_CF_Okta_Location_Region": "Americas",
"Last_Name": "Genie",
"Job_Function": "Product Management Function",
"State": "California",
"Exec_Admin_Flag": "N",
"Preferred_Name": "Norm Genie",
"Regular_Employee_Flag": "Y",
"Preferred_Name_-_Last_Name": "Genie",
"Cost_Center_Code": "651116",
"Location": "Office - USA - CA - Headquarters",
"Last_Day_of_Work": "02/15/2021",
"Termination_Date": "02/15/2021",
"Hire_Date": "01/01/2010"
},
{
"Employee_Type": "Regular",
"Leadership": "No",
"Work_Country_Code": "840",
"Street_Address": "3000 Tannery Way",
"Employment_Status": "Active",
"VP_Flag": "N",
"Mgr_ID": "115069",
"Cost_Center_Description": "IoT - PM",
"GDPR_Country_Flag": "0",
"Director_Flag": "N",
"Email_-_Primary_Home": "[email protected]",
"First_Name": "Santa",
"Last_Hire_Date": "06/15/2020",
"People_Manager_Flag": "N",
"Department": "Enterprise R&D:FWaaP",
"Workday_ID": "9aa7e309929e01b392c9a5220803c825",
"Postal_Code": "95054",
"Rehired_Employee": "No",
"Org_Level_1": "All R&D",
"Org_Level_3": "FWaaP",
"Country_Name": "United States Of America",
"Org_Level_2": "Enterprise R&D",
"Emp_ID": "115091",
"Job_Family": "Technical Writing",
"Preferred_Name_-_First_Name": "Santa",
"Prehire_Flag": "False",
"Management_Level_1": "Nikesh Arora",
"Work_Country_Abbrev": "US",
"Management_Level_2": "Timmy Turner",
"Email_Address": "[email protected]",
"Title": "Sr Technical Writer",
"City": "Santa Clara",
"Work_State_US_Only": "California",
"Job_Code": "5314",
"PAN_CF_Okta_Location_Region": "Americas",
"Last_Name": "Claus",
"Job_Function": "Product Management Function",
"State": "California",
"Exec_Admin_Flag": "N",
"Preferred_Name": "Santa Claus",
"Regular_Employee_Flag": "Y",
"Preferred_Name_-_Last_Name": "Claus",
"Cost_Center_Code": "651116",
"Location": "Office - USA - CA - Headquarters",
"Last_Day_of_Work": "02/15/2021",
"Termination_Date": "02/15/2021",
"Hire_Date": "01/01/2010"
},
{
"Employee_Type": "Regular",
"Leadership": "No",
"Work_Country_Code": "840",
"Street_Address": "3000 Tannery Way",
"Employment_Status": "Active",
"VP_Flag": "N",
"Mgr_ID": "115069",
"Cost_Center_Description": "IoT - PM",
"GDPR_Country_Flag": "0",
"Director_Flag": "N",
"Email_-_Primary_Home": "[email protected]",
"First_Name": "Dolores",
"Last_Hire_Date": "06/15/2020",
"People_Manager_Flag": "N",
"Department": "Enterprise R&D:FWaaP",
"Workday_ID": "9aa7e309929e0188f4eb6b2a08031228",
"Postal_Code": "95054",
"Rehired_Employee": "No",
"Org_Level_1": "All R&D",
"Org_Level_3": "FWaaP",
"Country_Name": "United States Of America",
"Org_Level_2": "Enterprise R&D",
"Emp_ID": "115088",
"Job_Family": "Software Engineering",
"Preferred_Name_-_First_Name": "Dolores",
"Prehire_Flag": "False",
"Management_Level_1": "Nikesh Arora",
"Work_Country_Abbrev": "US",
"Management_Level_2": "Timmy Turner",
"Email_Address": "[email protected]",
"Title": "Sr Mgr, UX Design",
"City": "Santa Clara",
"Work_State_US_Only": "California",
"Job_Code": "2164",
"PAN_CF_Okta_Location_Region": "Americas",
"Last_Name": "Crocker",
"Job_Function": "Engineering Function",
"State": "California",
"Exec_Admin_Flag": "N",
"Preferred_Name": "Dolores Crocker",
"Regular_Employee_Flag": "Y",
"Preferred_Name_-_Last_Name": "Crocker",
"Cost_Center_Code": "651116",
"Location": "Office - USA - CA - Headquarters",
"Last_Day_of_Work": "02/15/2021",
"Termination_Date": "02/15/2021",
"Hire_Date": "01/01/2010"
},
{
"Employee_Type": "Regular",
"Leadership": "No",
"Work_Country_Code": "840",
"Street_Address": "3000 Tannery Way",
"Employment_Status": "Active",
"VP_Flag": "N",
"Mgr_ID": "115069",
"Cost_Center_Description": "IoT - Engineering",
"GDPR_Country_Flag": "0",
"Director_Flag": "N",
"Email_-_Primary_Home": "[email protected]",
"First_Name": "Crash",
"Last_Hire_Date": "06/15/2020",
"People_Manager_Flag": "N",
"Department": "Enterprise R&D:FWaaP",
"Workday_ID": "9aa7e309929e014a0d78ca2c08030629",
"Postal_Code": "95054",
"Rehired_Employee": "No",
"Org_Level_1": "All R&D",
"Org_Level_3": "FWaaP",
"Country_Name": "United States Of America",
"Org_Level_2": "Enterprise R&D",
"Emp_ID": "115087",
"Job_Family": "Software Engineering",
"Preferred_Name_-_First_Name": "Crash",
"Prehire_Flag": "False",
"Management_Level_1": "Nikesh Arora",
"Work_Country_Abbrev": "US",
"Management_Level_2": "Timmy Turner",
"Email_Address": "[email protected]",
"Title": "Staff Engineer Software",
"City": "Santa Clara",
"Work_State_US_Only": "California",
"Job_Code": "5162",
"PAN_CF_Okta_Location_Region": "Americas",
"Last_Name": "Nebula",
"Job_Function": "Engineering Function",
"State": "California",
"Exec_Admin_Flag": "N",
"Preferred_Name": "Crash Nebula",
"Regular_Employee_Flag": "Y",
"Preferred_Name_-_Last_Name": "Nebula",
"Cost_Center_Code": "613116",
"Location": "Office - USA - CA - Headquarters",
"Last_Day_of_Work": "02/15/2021",
"Termination_Date": "02/15/2021",
"Hire_Date": "01/01/2010"
},
{
"Employee_Type": "Regular",
"Leadership": "No",
"Work_Country_Code": "840",
"Street_Address": "3000 Tannery Way",
"Employment_Status": "Active",
"VP_Flag": "N",
"Mgr_ID": "115069",
"Cost_Center_Description": "IoT - Engineering",
"GDPR_Country_Flag": "0",
"Director_Flag": "N",
"Email_-_Primary_Home": "[email protected]",
"First_Name": "Trixie",
"Last_Hire_Date": "06/15/2020",
"People_Manager_Flag": "N",
"Department": "Enterprise R&D:FWaaP",
"Workday_ID": "9aa7e309929e01eb443ce92e08031f2a",
"Postal_Code": "95054",
"Rehired_Employee": "No",
"Org_Level_1": "All R&D",
"Org_Level_3": "FWaaP",
"Country_Name": "United States Of America",
"Org_Level_2": "Enterprise R&D",
"Emp_ID": "115086",
"Job_Family": "Software Engineering",
"Preferred_Name_-_First_Name": "Trixie",
"Prehire_Flag": "False",
"Management_Level_1": "Nikesh Arora",
"Work_Country_Abbrev": "US",
"Management_Level_2": "Timmy Turner",
"Email_Address": "[email protected]",
"Title": "Principal Engineer Software",
"City": "Santa Clara",
"Work_State_US_Only": "California",
"Job_Code": "5164",
"PAN_CF_Okta_Location_Region": "Americas",
"Last_Name": "Tang",
"Job_Function": "Engineering Function",
"State": "California",
"Exec_Admin_Flag": "N",
"Preferred_Name": "Trixie Tang",
"Regular_Employee_Flag": "Y",
"Preferred_Name_-_Last_Name": "Tang",
"Cost_Center_Code": "613116",
"Location": "Office - USA - CA - Headquarters",
"Last_Day_of_Work": "02/15/2021",
"Termination_Date": "02/15/2021",
"Hire_Date": "01/01/2010"
}
]
}
NEW_HIRE_REPORT = {
"Report_Entry": [
{
"Employee_Type": "Regular",
"Leadership": "No",
"Work_Country_Code": "840",
"Street_Address": "3000 Tannery Way",
"Employment_Status": "Active",
"VP_Flag": "N",
"Mgr_ID": "115069",
"Cost_Center_Description": "IoT - PM",
"GDPR_Country_Flag": "0",
"Director_Flag": "N",
"Email_-_Primary_Home": "[email protected]",
"First_Name": 'first_name',
"Last_Hire_Date": "06/15/2020",
"People_Manager_Flag": "N",
"Department": "Enterprise R&D:FWaaP",
"Workday_ID": "9aa7e309929e013ff3c6e3440803b833",
"Postal_Code": "95054",
"Rehired_Employee": "No",
"Org_Level_1": "All R&D",
"Org_Level_3": "FWaaP",
"Country_Name": "United States Of America",
"Org_Level_2": "Enterprise R&D",
"Emp_ID": "115074",
"Job_Family": "Product Management",
"Preferred_Name_-_First_Name": 'first_name',
"Nikesh Arora": "False",
"Management_Level_1": "Nikesh Arora",
"Work_Country_Abbrev": "US",
"Management_Level_2": "Timmy Turner",
"Email_Address": 'user_email',
"Title": "Product Line Manager",
"City": "Santa Clara",
"Work_State_US_Only": "California",
"Job_Code": "5225",
"PAN_CF_Okta_Location_Region": "Americas",
"Last_Name": 'lsat_name',
"Job_Function": "Product Management Function",
"State": "California",
"Exec_Admin_Flag": "N",
"Preferred_Name": "Chester McBadbat",
"Regular_Employee_Flag": "Y",
"Preferred_Name_-_Last_Name": 'last_name',
"Cost_Center_Code": "651116",
"Location": "Office - USA - CA - Headquarters",
"Last_Day_of_Work": "02/15/2021",
"Termination_Date": "02/15/2021",
"Hire_Date": "01/01/2010"
}
]
}
APP: Flask = Flask('xsoar-workday')
@APP.route('/', methods=['GET'])
def get_full_reports():
integration_context = get_integration_context()
return jsonify(integration_context)
def get_full_report():
set_integration_context(FIRST_RUN_REPORT)
integration_context = get_integration_context()
return integration_context['Report_Entry'][0]
def test_module():
if int(demisto.params().get('longRunningPort', '')) and demisto.params().get("longRunning"):
user_report = get_full_report()
if user_report:
demisto.results('ok')
else:
return_error('Could not connect to the long running server. Please make sure everything is configured.')
else:
return_error('Please make sure the long running port is filled and the long running checkbox is marked.')
def get_employee_id():
"""
Get the maximum employee id number and increase it by one.
This function is used to avoid duplication while creating a new hire report.
Returns: (int) Employee ID number.
"""
integration_context = get_integration_context()
employee_ids = []
for report in integration_context['Report_Entry']:
employee_id = int(report.get('Emp_ID'))
employee_ids.append(employee_id)
max_employee_id = int(max(employee_ids)) + 1
return str(max_employee_id)
def generate_new_hire_reports():
user_email = demisto.args().get('user_email')
first_name = demisto.args().get('first_name', '')
last_name = demisto.args().get('last_name', '')
integration_context = get_integration_context()
new_report = NEW_HIRE_REPORT['Report_Entry'][0]
for report in integration_context['Report_Entry']:
email_address = report.get('Email_Address')
if user_email == email_address:
raise Exception(f'User "{user_email}" already exist. Please try another user email.')
new_report['Email_Address'] = user_email
new_report['First_Name'] = first_name
new_report['Last_Name'] = last_name
new_report['Preferred_Name'] = f'{first_name} {last_name}'
new_report['Preferred_Name_-_First_Name'] = first_name
new_report['Preferred_Name_-_Last_Name'] = last_name
new_report['Emp_ID'] = get_employee_id()
integration_context['Report_Entry'].append(new_report)
set_integration_context(integration_context)
return_results('Successfully generated the new hire event.')
def generate_terminate_report():
user_email = demisto.args().get('user_email')
integration_context = get_integration_context()
now = datetime.now()
current_date = now.strftime("%m/%d/%Y")
user_report = None
for report in integration_context['Report_Entry']:
if report['Email_Address'] == user_email:
user_report = report
if not user_report:
raise Exception(f'The user email {user_email} does not exist. Please try one of the followings: '
f'[email protected], [email protected], [email protected]')
is_terminated = user_report.get('Employment_Status')
rehired_status = user_report.get('Rehired_Employee')
if is_terminated == 'Terminated' and rehired_status == 'No':
raise Exception(f'The user {user_email} is already terminated.')
user_report['Employment_Status'] = 'Terminated'
user_report['Last_Day_of_Work'] = demisto.args().get('last_day_of_work', str(current_date))
user_report['Termination_Date'] = demisto.args().get('termination_date', str(current_date))
set_integration_context(integration_context)
return_results('Successfully generated the Terminate user event.')
def generate_update_report():
user_email = demisto.args().get('user_email')
integration_context = get_integration_context()
title = demisto.args().get('title')
city = demisto.args().get('city')
street_address = demisto.args().get('street_address')
last_day_of_work = demisto.args().get('last_day_of_work')
user_report = None
for report in integration_context['Report_Entry']:
if report['Email_Address'] == user_email:
user_report = report
if not user_report:
raise Exception(f'The user email {user_email} does not exist. Please try one of the followings: '
f'[email protected], [email protected], [email protected]')
if title:
user_report['Title'] = title
if city:
user_report['City'] = city
if street_address:
user_report['Street_Address'] = street_address
if last_day_of_work:
user_report['Last_Day_of_Work'] = last_day_of_work
set_integration_context(integration_context)
return_results('Successfully generated the Update user event.')
def generate_rehire_report():
user_email = demisto.args().get('user_email')
integration_context = get_integration_context()
user_report = None
for report in integration_context['Report_Entry']:
if report['Email_Address'] == user_email:
user_report = report
if not user_report:
raise Exception(f'The user email {user_email} does not exist. Please try one of the followings: '
f'[email protected], [email protected], [email protected]')
is_terminated = user_report.get('Employment_Status')
rehired_status = user_report.get('Rehired_Employee')
if is_terminated == 'Active' or rehired_status == 'Yes':
raise Exception(f'The user {user_email} is not terminated. Either he is still active or was already '
f'rehired.')
user_report['Rehired_Employee'] = 'Yes'
user_report['Prehire_Flag'] = 'True'
set_integration_context(integration_context)
return_results('Successfully generated the rehire user event.')
def main():
if demisto.command() == 'test-module':
test_module()
elif demisto.command() == 'long-running-execution':
integration_context = get_integration_context()
if not integration_context:
set_integration_context(FIRST_RUN_REPORT)
while True:
port = int(demisto.params().get('longRunningPort', ''))
server = WSGIServer(('0.0.0.0', port), APP)
server.serve_forever()
elif demisto.command() == 'workday-generate-hire-event':
generate_new_hire_reports()
elif demisto.command() == 'workday-generate-update-event':
generate_update_report()
elif demisto.command() == 'workday-generate-rehire-event':
generate_rehire_report()
elif demisto.command() == 'workday-generate-terminate-event':
generate_terminate_report()
elif demisto.command() == 'initialize-context':
set_integration_context(FIRST_RUN_REPORT)
return_results('The integration context has been initialized.')
if __name__ == '__builtin__' or __name__ == 'builtins':
main()
| 40.838428 | 116 | 0.559524 |
be9f272fbb8ad4237daf7d8186aa07fed9c5e8af
| 53 |
py
|
Python
|
src/mvg/__init__.py
|
dfrommi/alfred-mvv
|
5310f80ca3e17686fb534db0e53a613043a1b352
|
[
"MIT"
] | 2 |
2019-07-07T19:24:15.000Z
|
2019-10-16T09:07:25.000Z
|
src/mvg/__init__.py
|
dfrommi/alfred-mvv
|
5310f80ca3e17686fb534db0e53a613043a1b352
|
[
"MIT"
] | 1 |
2020-06-05T16:49:17.000Z
|
2020-06-05T16:49:17.000Z
|
src/mvg/__init__.py
|
dfrommi/alfred-mvv
|
5310f80ca3e17686fb534db0e53a613043a1b352
|
[
"MIT"
] | 2 |
2017-04-03T11:47:59.000Z
|
2019-10-16T09:09:26.000Z
|
from .api import MVG
from .favorites import Favorites
| 26.5 | 32 | 0.830189 |
43d39546ea2d1046b2d090b13c9a78e4f68b1b01
| 246 |
py
|
Python
|
Python/B2-Wuerfel/Wuerfel.py
|
frankyhub/Calliope
|
335f0ef5ca9bcf57e14166319501ec9086bc09bf
|
[
"MIT"
] | null | null | null |
Python/B2-Wuerfel/Wuerfel.py
|
frankyhub/Calliope
|
335f0ef5ca9bcf57e14166319501ec9086bc09bf
|
[
"MIT"
] | null | null | null |
Python/B2-Wuerfel/Wuerfel.py
|
frankyhub/Calliope
|
335f0ef5ca9bcf57e14166319501ec9086bc09bf
|
[
"MIT"
] | null | null | null |
def on_button_pressed_a():
basic.show_number(randint(1, 6))
input.on_button_pressed(Button.A, on_button_pressed_a)
def on_button_pressed_b():
basic.show_number(randint(1, 4))
input.on_button_pressed(Button.B, on_button_pressed_b)
| 30.75 | 55 | 0.772358 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.