seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
โ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
37021992627
|
from django.core.exceptions import ObjectDoesNotExist
from django.http import StreamingHttpResponse, HttpResponse
from rest_framework.response import Response
from .models import AudioSlice, Audio
from .serializers import AudioSerializer, AudioSliceSerializer
from rest_framework.decorators import api_view
from rest_framework.decorators import parser_classes
from rest_framework.parsers import MultiPartParser
from .utils import utils as ut
from audio.dbmanager.redis_dao import *
from audio.services.preprocessor import AudioPreprocessor
from audio.dbmanager.youtube_handler import *
import re
import time
range_re = re.compile(r'bytes\s*=\s*(\d+)\s*-\s*(\d*)', re.I)
file_count = 0
"""
timeline
t0 : page 1์์ ์ฌ์ฉ์๊ฐ ์ค๋์ค ์์ฒญ์ ๋ณด๋ผ ๋
t1 : page 2์์ ์ฌ์ฉ์๊ฐ ์ค๋์ค์ ๊ตฌ๊ฐ์ ์ ํํ ๋
"""
"""
t0 > CELERY RESULT BACKEND
์ฌ์ฉ์์ ์ค๋์ค ์์ฒญ์ ๋ํ ์ ์ฒ๋ฆฌ ์คํ ํ ๊ธฐ๋ก --> view ํจ์์
"""
# preprocessor = AudioPreprocessor()
# # task_id๋ audio์ id
# # audio_id = uuid.uuid4() # ์ฒ์ ๋ค์ด์ค๋ ๊ฒฝ์ฐ, ๊ทธ๊ฒ ์๋๋ฉด database์์ ๊บผ๋ด์ค๊ธฐ
# AudioPreprocessor().preprocess.apply_async((3, 56), task_id="hiek", expires=datetime.now() + timedelta(days=1))
"""
t1 > USER INFO RECORD : (audio <----> choreo <----> product) Inter-server communication
KEY "a30gk3" <-- uuid.uuid4()
VAL (HSET)
{ audio_id : e317fce <-- ํด๋ผ์ด์ธํธ์๊ฒ ๋ฐ์ ๊ฒ
start : 13 <-- audio_handler๊ฐ ๊ณ์ฐํ๋๋ก
end : 31 <-- audio_handler๊ฐ ๊ณ์ฐํ๋๋ก
progress : 0.0 } <-- ์ด๋์ ๋ ์งํ๋์๋์ง percentage
"""
"""
t1 > SIMILARITY : (audio <----> choreo) Inter-server communication
KEY e317fce-14 <-- ๋
ธ๋ ๊ตฌ๊ฐ id
VAL [ "af3g0s39_13 : 89", "ldf9a8i_4 : 90", "fk02j3bu_9 : 99", ... ] <-- ๋
ธ๋๊ตฌ๊ฐ id ์ ์ ์๊ฐ ๋งคํ๋ ์์๋ค๋ก ๊ตฌ์ฑ๋ list
"""
"""
t1 > AMPLITUDE : (audio <----> choreo) Inter-server communication
KEY e317fce-14 <-- ๋
ธ๋ ๊ตฌ๊ฐ id
VAL [ 7 2 9 8 6 ] <-- ์ ์ list
"""
"""
===================================================================================================================
"""
# def upload_file(request):
# if request.method == 'POST':
# form = UploadFileForm(request.POST, request.FILES)
# if form.is_valid():
# instance = ModelWithFileField(file_field=request.FILES['file'])
# instance.save()
# return HttpResponseRedirect('/success/url/')
# else:
# form = UploadFileForm()
# return render(request, 'upload.html', {'form': form})
@api_view(['POST'])
@parser_classes([MultiPartParser])
def meta(request):
data = MultiPartParser.parse(request)
print(data)
res = write_from_meta()
return Response(AudioSerializer(Audio.objects.all(), many=True).data)
@api_view(['POST'])
async def youtube_url(request):
download_url = request.data.get("download_url")
try:
# ์ด๋ฏธ ์๋ ๊ฒฝ์ฐ
return Response(AudioSerializer(Audio.objects.get(download_url=download_url)).data)
except ObjectDoesNotExist:
try:
print(f"started at {time.strftime('%X')}")
_id, _title, _duration = await write_from_link(download_url)
audio = Audio(audio_id=_id, title=_title, download_url=download_url, duration=_duration)
audio.save()
serializer = AudioSerializer(audio)
# ์ด๊ฒ tasks์ ํด๋น๋จ
AudioPreprocessor(audio=audio).preprocess()
# ํ์ผ ์ฐพ์์ ์ ๋ณด์ ํจ๊ป ๋ณด๋ด์ฃผ๊ธฐ
return Response(serializer.data)
except:
print("===========download failure=============")
return Response("cannot open file.", status=400)
# response = StreamingHttpResponse(streaming_content=request.FILES["audio_file"])
# response['Content-Disposition'] = f'attachment; filename="{request.data["audio_file"]}"'
# return response
@api_view(['POST'])
@parser_classes([MultiPartParser])
# @renderer_classes([MultiPartRenderer])
def file(request):
"""
:param request:
:return: audio_id ์ file์ streaming ํํ๋ก
"""
ext = request.data.get("ext")
global file_count
filename = "up" + str(file_count)
if ext != "wav":
ut.get_console_output(
'ffmpeg -n -i "{}/{}.{}" "{}/{}.wav"'.format("../../media/ORG", filename, ext, "../../media/WAV",
filename))
# ๋ฐ๋ก ํ์ผ ์ ์ฅ - store in the volume
file_count += 1
response = StreamingHttpResponse(streaming_content=request.data["audio_file"])
response['Content-Disposition'] = f'attachment; filename="{request.data["audio_file"]}"'
return response
@api_view(['POST'])
def skeletal_after_interval(request):
"""
:param request: audio_id, start_sec, end_sec
:return:
"""
audio_id = request.data.get('audio_id')
user_start_sec = request.data['start_sec']
user_end_sec = request.data['end_sec']
UserRedisHandler.set_user_info(audio_id, user_start_sec, user_end_sec)
if bool(AudioSlice.objects.filter(audio_slice_id__contains=audio_id)):
start_arr = AudioSlice.objects.values_list('start_sec', flat=True)
start_audio_slice_id = AudioSlice.objects.get(
start_sec=ut.find_nearest(start_arr, user_start_sec)).only('audio_slice_id')
end_audio_slice_id = request.data.get('audio_id') + AudioSlice.objects.get(
start_sec=ut.find_nearest(start_arr, user_end_sec)).only('audio_slice_id').split("_")[1]
else:
audio_handler = AudioPreprocessor(Audio.objects.get(audio_id=audio_id))
audio_handler.preprocess()
start_audio_slice_id = audio_handler.get_slice_id(ut.find_nearest(audio_handler.beat_track, user_start_sec))
end_audio_slice_id = audio_handler.get_slice_id(ut.find_nearest(audio_handler.beat_track, user_end_sec))
interval_number = int(end_audio_slice_id.split("ใ
ก")[1]) - int(start_audio_slice_id.split("ใ
ก")[1])
# Task 1. Similarity process & get into redis
# smlr_app = Celery('redis_dao', backend=cc.result_smlr_backend, broker=cc.broker_smlr_url)
# smlr_app.config_from_object('celery_config') --๊ผญ ์ํด๋ ๋ ๋ฏ
# ์ฌ๊ธฐ์ ํ๋ฆฐ์ด๊ฐ ํ ๋ถ๋ถ์ ์ด๋ป๊ฒ ์ด๋ป๊ฒ ๋ง๋ค์ด์..
# cluster_smlr.apply_async(filter_kmeans_labels, filter_feat, 0, 6))
# Task 2. Amplitude process & get into redis
# ampl_app = Celery(backend=cc.result_ampl_backend, broker=cc.broker_ampl_url)
# get_amplitude.apply_async((3, 56), task_id=audio_id, expires=datetime.now() + timedelta(days=1))
return Response(
AudioSliceSerializer(start_audio_slice_id=start_audio_slice_id, end_audio_slice_id=end_audio_slice_id,
interval_number=interval_number).data)
# app = Celery('redis_dao', backend=cc.result_backend, broker=cc.broker_url)
# app.config_from_object('celery_config')
# def youtube(request):
# # task_id๋ audio์ id
# audio_id = uuid.uuid4() # ์ฒ์ ๋ค์ด์ค๋ ๊ฒฝ์ฐ, ๊ทธ๊ฒ ์๋๋ฉด database์์ ๊บผ๋ด์ค๊ธฐ
# preprocess.apply_async((3, 56), task_id=audio_id, expires=datetime.now() + timedelta(days=1))
# def serve(request):
# return FileResponse(open(request.data.get('music'), 'rb'))
@api_view(['POST'])
def get_music(request):
with open(request.data.get('music'), 'rb') as f:
# ํ์ํ ์๋ตํค๋ ์ธํ
return set_audio_response('์ค๋์คํ์ผ ๊ฒฝ๋ก, wav ํ์ฅ์๊น์ง ๊ผญ ์
๋ ฅํ ๊ฒ', "์ค๋์ค ํ์ผ id(youtube id)", "wav",
"์ค๋์คํ์ผ duration float ํํ๋ก")
def set_audio_response(audio_src, audio_id, ext, duration):
response = HttpResponse(open(audio_src, "rb"))
response["Access-Control-Allow-Origin"] = "*"
response['Content-Type'] = "application/octet-stream"
response['Content-Disposition'] = f'attachment; filename="{audio_id}.{ext}"' # wav๋ง ๋ณด๋ด์ง ์์๋ ๋๋๋ก
response['audio_id'] = audio_id
response['duration'] = duration
return response
# data = {
# "audio_id": "dfsdff",
# "interval_number": 14,
# "music": open(request.data.get('music'), 'rb')
# }
# return HttpResponse(data)
# response = HttpResponse(content=open(request.data.get('music'), 'rb'))
# response['Content-Type'] = 'application/json'
# return FileResponse(open(request.data.get('music'), 'rb'))
|
Choleor/choleor-audio-reboot
|
audio/views_old.py
|
views_old.py
|
py
| 8,450 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26255854051
|
from MainA1 import Mainfun
from unigramIndex import Linkedlist
import string
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import pickle
class QueryProcess:
def __init__(self):
'''Attribute for each Query processing results, Totdocmatch for total documentmatch,
comparison for total comparison done in a merging algo, and fnamelist for list of all matched file'''
self.Totdocmatch = 0
self.comparison = 0
self.fnamelist = []
'''function for preprocessing of a query including converting into
lower letter, remove punctuation, tokenization, remove stopping words and Lemmatization'''
def preprocess(self, query):
#normalisation
result1 = query.lower()
result2 = result1.translate(str.maketrans("","", string.punctuation))
#tokenization
tokens = word_tokenize(result2)
#removing the stopping words
stop_words = set(stopwords.words('english'))
result3 = [w for w in tokens if w not in stop_words]
#Lemmatization
lem = WordNetLemmatizer()
result4query = []
for word in result3:
lmword = lem.lemmatize(word)
result4query.append(lmword)
return(result4query)
def MergingAlgo(self, postlink, operatorseq, maxDocID, filename):
length = len(operatorseq)
#retrieve first posting list
post1 = postlink[0]
#Process the query from Left to Right, Iterate the query starting from query operator list
for i in range(length):
#REtrieve the operator and second postinglist
operator = operatorseq[i]
post2 = postlink[i+1]
if (operator == 'AND'):
p1 = post1.headptr
p2 = post2.headptr
#Calling the specific intersection Merge Algo
post1 = self.MergeAND(p1, p2)
'''checking the resultant postinglist will be null or not,
if it is null then this post1 will move further to the next index in query list'''
if(post1.freq == 0):
post1 = postlink[i+1]
i=i+1
elif(operator == 'OR'):
p1 = post1.headptr
p2 = post2.headptr
#Calling the specific Union Merge Algo
post1 = self.MergeOR(p1, p2)
'''checking the resultant postinglist will be null or not,
if it is null then this post1 will move further to the next index in query list'''
if(post1.freq == 0):
post1 = postlink[i+1]
i=i+1
elif(operator == 'AND NOT'):
tp2 = post2.headptr
#Computing the complement of second posting list
resulttp = self.ListCompliment(tp2, maxDocID)
p1 = post1.headptr
p2 = resulttp.headptr
#Calling the specific intersection Merge Algo
post1 = self.MergeAND(p1, p2)
'''checking the resultant postinglist will be null or not,
if it is null then this post1 will move further to the next index in query list'''
if(post1.freq == 0):
post1 = postlink[i+1]
i=i+1
elif(operator == 'OR NOT'):
tp2 = post2.headptr
#Computing the complement of second posting list
resulttp = self.ListCompliment(tp2, maxDocID)
p1 = post1.headptr
p2 = resulttp.headptr
#Calling the specific Union Merge Algo
post1 = self.MergeOR(p1, p2)
'''checking the resultant postinglist will be null or not,
if it is null then this post1 will move further to the next index in query list'''
if(post1.freq == 0):
post1 = postlink[i+1]
i=i+1
'''After completing the merging Algo, the final resultant posting list will be post1
retreiving the Document name acc. to the docID present in the final posting list'''
self.Totdocmatch = post1.freq
pt = post1.headptr
while(pt is not None):
self.fnamelist.append(filename[pt.IDval])
pt = pt.next
def MergeAND(self, ptr1, ptr2):
answer = Linkedlist()
#ptr1 and ptr2 , iterate the both pointer till the end of the linkedlist, both linkedlist are already in sorted form
while(ptr1 is not None and ptr2 is not None):
if(ptr1.IDval == ptr2.IDval):
#here when both pointer node value matches, then add the nodevalue to the answer linked list
answer.addnode(ptr1.IDval)
#move both pointer by one node
ptr1 = ptr1.next
ptr2 = ptr2.next
#here counting the comarison, in this algo this is the first comparison so just add 1 to the comparison variable
self.comparison = self.comparison + 1
elif(ptr1.IDval < ptr2.IDval):
#here the ptr1 is behind the ptr2, so just move ptr1 by one node
ptr1 = ptr1.next
#here counting the comarison, in this algo this is the second comparison so just add 2 to the comparison variable
self.comparison = self.comparison + 2
else:
#here in the else, the ptr2 is behind the ptr1, so just move ptr2 by one node
ptr2 = ptr2.next
#here counting the comarison, in this algo 2 comparison are already done in above, so just add 2 to the comparison variable
self.comparison = self.comparison + 2
return answer
def MergeOR(self, ptr1, ptr2):
answer = Linkedlist()
#ptr1 and ptr2 , iterate the both pointer till the end of the linkedlist, both linkedlist are already in sorted form
while(ptr1 is not None and ptr2 is not None):
if(ptr1.IDval < ptr2.IDval):
#add the nodevalue to the answer linked list
answer.addnode(ptr1.IDval)
#here the ptr1 is behind the ptr2, so just move ptr1 by one node
ptr1 = ptr1.next
#here counting the comarison, in this algo this is the first comparison so just add 1 to the comparison variable
self.comparison = self.comparison + 1
elif(ptr1.IDval > ptr2.IDval):
#add the nodevalue to the answer linked list
answer.addnode(ptr2.IDval)
#the ptr2 is behind the ptr1, so just move ptr2 by one node
ptr2 = ptr2.next
#here counting the comarison, in this algo this is the second comparison so just add 2 to the comparison variable
self.comparison = self.comparison + 2
else:
#here in the else, when both pointer node value matches, then add the nodevalue to the answer linked list
answer.addnode(ptr1.IDval)
#move both pointer by one node
ptr1 = ptr1.next
ptr2 = ptr2.next
#here counting the comarison, in this algo 2 comparison are already done in above, so just add 2 to the comparison variable
self.comparison = self.comparison + 2
#if ptr2 becomes none but ptr1 is not none, so just add the remaining node value of ptr1 to the answer linkedlsit
while(ptr1 is not None):
answer.addnode(ptr1.IDval)
ptr1 = ptr1.next
#if ptr1 becomes none but ptr2 is not none, so just add the remaining node value of ptr2 to the answer linkedlsit
while(ptr2 is not None):
answer.addnode(ptr2.IDval)
ptr2 = ptr2.next
return answer
#Function for finding the complement of a linkedlist
def ListCompliment(self, ptr, maxDocID):
i = 0
answer = Linkedlist()
#here maxDOCID is representing the number that the max docID that allocate to the document(0-maxdocID)
while(i < maxDocID and ptr is not None):
#if the docID present in the list, so just move to the next node
if(i == ptr.IDval):
i = i+1
ptr = ptr.next
#if the docID not present in the list, so just add to the answer linkedlist
elif(i < ptr.IDval):
answer.addnode(i)
i=i+1
#adding the remaining docID to the answer linkedlist
while(i < maxDocID):
answer.addnode(i)
i=i+1
return(answer)
if __name__ == '__main__':
#Deserailization of MainA1 class object, in which unigram data structure has stored
with open('store.dat' , 'rb') as fr:
tempomainobj = pickle.load(fr)
#retriving the unigram data structure, list of all doc, max doc ID
dictlist = tempomainobj.postinglist
filename = tempomainobj.docname
maxDocID = tempomainobj.docID
#Input the no. of query from the User
n = int(input("Enter the number of Query: "))
for i in range(n):
#input the query and query operator
query = input("Input Query: ")
queryoperatorseq = input("Input Query operator: ").split(', ')
#Preprocessing of Query
Queryobj = QueryProcess()
prepresult = Queryobj.preprocess(query)
#Retriving the postinglist of each tokenize word of a query in postlink[] list
postlink = []
for qword in prepresult:
LinkL = dictlist.get(qword)
postlink.append(LinkL)
#Process the Query and query operator by merging Algoruthm
Queryobj.MergingAlgo(postlink, queryoperatorseq, maxDocID, filename)
#print the desirable result of a query
print('Number of document matched: ', end=' ')
print(Queryobj.Totdocmatch)
print('Number of comparison Done in Merging Algorithm: ', end=' ')
print(Queryobj.comparison)
print('List of matched document name:')
print(Queryobj.fnamelist)
|
prashant18360/Information-Retrieval-Assignment-1
|
Qprocessing.py
|
Qprocessing.py
|
py
| 10,931 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4817137896
|
import cv2
import os
import numpy as np
import imutils
def open_picture(image):
"""We open picture"""
img = cv2.imread(image)
return img
def show_picture(name, image, mode, destroy):
cv2.imshow(name, image)
cv2.waitKey(mode)
if mode == 1:
time.sleep(0.2)
if destroy == "y":
cv2.destroyAllWindows()
def save_picture(name, image):
path = "dataset/data_analysing/{}"
cv2.imwrite(path.format(str(name)), image)
def blanck_picture(img):
"""Create a black background picture same dimension of original picture"""
blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
blank_image[0:img.shape[0], 0:img.shape[1]] = 0, 0, 0
return blank_image
def find_object(img):
"""
We binarising picture
for only have a form of our object.
We search contours now
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,250,255,cv2.THRESH_BINARY_INV)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
return contours
def recup_object(contours, img):
"""
We search the max contours.
Sometimes there are noise of litle
area of the rest of the background
of pixels (5x5) of background.
We don't want it !
After we make a crop of that.
"""
maxi = 0
for cnts in contours:
if cv2.contourArea(cnts) > maxi:
maxi = cv2.contourArea(cnts)
for cnts in contours:
if cv2.contourArea(cnts) == maxi:
x, y, w, h = cv2.boundingRect(cnts)
crop = img[y:y+h, x:x+w]
return crop
def main_croping(picture):
img = open_picture(picture)
contours = find_object(img)
crop = recup_object(contours, img)
return crop
|
LeGrosLezard/qu-est-ce-qu-il-y-a-dans-une-salle-a-manger-
|
program/training/crop_object.py
|
crop_object.py
|
py
| 1,965 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30777340619
|
from unittest import TestCase
from mapper.file_mapper import FileMapper
from container.file import File
class TestFileMapper(TestCase):
def test_get_files_from_diff_should_return_four_files(self):
diff = '4\t0\t.gitignore\n' \
'8\t8\tIZIFarmaProm/build.gradle\n' \
'1\t1\tIZIFarmaProm/src/dev/res/values/strings.xml\n' \
'2\t6\tIZIFarmaProm/src/main/AndroidManifest.xml\n' \
' '
file_mapper = FileMapper(diff)
actual = file_mapper.map_files('', get_file_content_mock)
self.assertEqual(4, actual.__len__())
def test_get_files_from_diff_should_return_correct_array(self):
diff = '4\t0\t.gitignore\n' \
' '
file_mapper = FileMapper(diff)
diffed_files = file_mapper.map_files('', get_file_content_mock)
actual = diffed_files[0]
expected = File('.gitignore', None, 4, 0)
self.assertEqual(expected.file_path, actual.file_path)
self.assertEqual(expected.deleted_lines, actual.deleted_lines)
self.assertEqual(expected.added_lines, actual.added_lines)
def get_file_content_mock(project_path, file_path):
pass
|
farmapromlab/GITAG
|
test/test_fileMapper.py
|
test_fileMapper.py
|
py
| 1,191 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20538043919
|
"""
You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order, and each of their nodes contains a single digit. Add the two numbers and return the sum as a linked list.
You may assume the two numbers do not contain any leading zero, except the number 0 itself.
"""
"""
Time complexity:- O(max(n,m))
Space Complexity:- O(1)
"""
from typing import Optional
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def addTwoNumbers(
self, l1: Optional[ListNode], l2: Optional[ListNode]
) -> Optional[ListNode]:
# Create a dummy head and a tail pointer for the result linked list
dummyHead = ListNode(0)
tail = dummyHead
carry = 0 # Initialize the carry to 0
while l1 or l2 or carry != 0:
# Get the current digits of l1 and l2 (or 0 if one of them is None)
digit1 = l1.val if l1 else 0
digit2 = l2.val if l2 else 0
# Calculate the sum of the digits and the carry
_sum = digit1 + digit2 + carry
digit = _sum % 10
carry = _sum // 10
# Create a new node with the calculated digit
newNode = ListNode(digit)
# Append the new node to the result linked list
tail.next = newNode
tail = tail.next
# Move to the next nodes in l1 and l2 (if available)
l1 = l1.next if l1 else None
l2 = l2.next if l2 else None
# Get the result linked list starting from the node after the dummy head
result = dummyHead.next
# Remove the reference to the rest of the linked list
dummyHead.next = None
return result # Return the result linked list
|
Amit258012/100daysofcode
|
Day14/add_two_numbers_linked_list.py
|
add_two_numbers_linked_list.py
|
py
| 1,871 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31211286041
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 07:07:43 2015
@author: RAHUL JAIN
"""
from xml.dom.minidom import parse
import xml.dom.minidom
DOMTree = xml.dom.minidom.parse("chemsep1.xml")
compounds = DOMTree.documentElement
compound = compounds.getElementsByTagName("compound")
i = 1
for comp in compound:
compName = comp.getElementsByTagName("CompoundID")[0].getAttribute("value")
CompName = compName.replace(" ","")
CompName = CompName.replace("-","")
CompName = CompName.replace(",","")
CompName = CompName.replace("1","One")
CompName = CompName.replace("2","Two")
CompName = CompName.replace("3","Three")
CompName = CompName.replace("4","Four")
CompName = CompName.replace("5","Five")
CriticalTemp = comp.getElementsByTagName("CriticalTemperature")[0].getAttribute("value")
CriticalPres = comp.getElementsByTagName("CriticalPressure")[0].getAttribute("value")
CriticalVol = comp.getElementsByTagName("CriticalVolume")[0].getAttribute("value")
CriticalComp = comp.getElementsByTagName("CriticalCompressibility")[0].getAttribute("value")
try:
NormalBoilPoint = comp.getElementsByTagName("NormalBoilingPointTemperature")[0].getAttribute("value")
except IndexError:
NormalBoilPoint = "0"
try:
NormalMeltingPoint = comp.getElementsByTagName("NormalMeltingPointTemperature")[0].getAttribute("value")
except IndexError:
NormalMeltingPoint = "0"
try:
TripPntTemp = comp.getElementsByTagName("TriplePointTemperature")[0].getAttribute("value")
except IndexError:
TripPntTemp = "0"
try:
TripPntPres = comp.getElementsByTagName("TriplePointPressure")[0].getAttribute("value")
except IndexError:
TripPntPres = "0"
MolWt = comp.getElementsByTagName("MolecularWeight")[0].getAttribute("value")
try:
LiqVolAtBoilPnt = comp.getElementsByTagName("LiquidVolumeAtNormalBoilingPoint")[0].getAttribute("value")
except IndexError:
LiqVolAtBoilPnt = "0"
try:
AcenFactor = comp.getElementsByTagName("AcentricityFactor")[0].getAttribute("value")
except IndexError:
AcenFactor = "0"
try:
SolParam = comp.getElementsByTagName("SolubilityParameter")[0].getAttribute("value")
except IndexError:
SolParam = "0"
try:
DipoleMoment = comp.getElementsByTagName("DipoleMoment")[0].getAttribute("value")
except IndexError:
DipoleMoment = "0"
try:
IGHF = comp.getElementsByTagName("HeatOfFormation")[0].getAttribute("value")
except IndexError:
IGHF = "0"
try:
GEF = comp.getElementsByTagName("GibbsEnergyOfFormation")[0].getAttribute("value")
except IndexError:
GEF = "0"
try:
AbsEntropy = comp.getElementsByTagName("AbsEntropy")[0].getAttribute("value")
except IndexError:
AbsEntropy = "0"
try:
HeatFusionMeltPnt = comp.getElementsByTagName("HeatOfFusionAtMeltingPoint")[0].getAttribute("value")
except IndexError:
HeatFusionMeltPnt = "0"
try:
HOC = comp.getElementsByTagName("HeatOfCombustion")[0].getAttribute("value")
except IndexError:
HOC = "0"
try:
UniquacR = comp.getElementsByTagName("UniquacR")[0].getAttribute("value")
except IndexError:
UniquacR = "0"
try:
UniquacQ = comp.getElementsByTagName("UniquacQ")[0].getAttribute("value")
except IndexError:
UniquacQ = "0"
try:
RacketParam = comp.getElementsByTagName("RacketParameter")[0].getAttribute("value")
except IndexError:
RacketParam = "0"
try:
LiqDen = comp.getElementsByTagName("LiquidDensity")[0]
LiqDenEqn = LiqDen.getElementsByTagName("eqno")[0].getAttribute("value")
A=LiqDen.getElementsByTagName("A")[0].getAttribute("value")
B=LiqDen.getElementsByTagName("B")[0].getAttribute("value")
C=LiqDen.getElementsByTagName("C")[0].getAttribute("value")
D=LiqDen.getElementsByTagName("D")[0].getAttribute("value")
try:
E=LiqDen.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
E = "0"
except IndexError:
LiqDenEqn = "0"
A = "0"
B = "0"
C = "0"
D = "0"
E = "0"
try:
VapPres = comp.getElementsByTagName("VaporPressure")[0]
VapPresEqn = VapPres.getElementsByTagName("eqno")[0].getAttribute("value")
VA=VapPres.getElementsByTagName("A")[0].getAttribute("value")
VB=VapPres.getElementsByTagName("B")[0].getAttribute("value")
VC=VapPres.getElementsByTagName("C")[0].getAttribute("value")
try:
VD=VapPres.getElementsByTagName("D")[0].getAttribute("value")
except IndexError:
VD = "0"
try:
VE=VapPres.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
VE = "0"
except IndexError:
VapPresEqn = "0"
VA = "0"
VB = "0"
VC = "0"
VD = "0"
VE = "0"
try:
LiqCp = comp.getElementsByTagName("LiquidHeatCapacityCp")[0]
LiqCpEqn = LiqCp.getElementsByTagName("eqno")[0].getAttribute("value")
LCpA=LiqCp.getElementsByTagName("A")[0].getAttribute("value")
LCpB=LiqCp.getElementsByTagName("B")[0].getAttribute("value")
LCpC=LiqCp.getElementsByTagName("C")[0].getAttribute("value")
try:
LCpD=LiqCp.getElementsByTagName("D")[0].getAttribute("value")
except IndexError:
LCpD = "0"
try:
LCpE=LiqCp.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
LCpE = "0"
except IndexError:
LiqCpEqn = "0"
LCpA = "0"
LCpB = "0"
LCpC = "0"
LCpD = "0"
LCpE = "0"
try:
HOV = comp.getElementsByTagName("HeatOfVaporization")[0]
HOVEqn = HOV.getElementsByTagName("eqno")[0].getAttribute("value")
HOVA=HOV.getElementsByTagName("A")[0].getAttribute("value")
HOVB=HOV.getElementsByTagName("B")[0].getAttribute("value")
HOVC=HOV.getElementsByTagName("C")[0].getAttribute("value")
try:
HOVD=HOV.getElementsByTagName("D")[0].getAttribute("value")
except IndexError:
HOVD = "0"
try:
HOVE=HOV.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
HOVE = "0"
except IndexError:
HOVEqn = "0"
HOVA = "0"
HOVB = "0"
HOVC = "0"
HOVD = "0"
HOVE = "0"
if (float(NormalBoilPoint) > 298.15 ):
HA = float(HOVA)
HB = float(HOVB)
HC = float(HOVC)
HD = float(HOVD)
HE = float(HOVE)
Tr = 298.15/float(CriticalTemp)
SHOV = HA*(pow((1-Tr),(HB + HC*Tr + HD*pow(Tr,2) + HE*pow(Tr,3))))
AbsEnthalpy = float(IGHF) - SHOV
else:
AbsEnthalpy = float(IGHF)
SH = str(AbsEnthalpy)
try:
VapCp = comp.getElementsByTagName("IdealGasHeatCapacityCp")[0]
VapCpEqn = VapCp.getElementsByTagName("eqno")[0].getAttribute("value")
VCpA=VapCp.getElementsByTagName("A")[0].getAttribute("value")
VCpB=VapCp.getElementsByTagName("B")[0].getAttribute("value")
VCpC=VapCp.getElementsByTagName("C")[0].getAttribute("value")
try:
VCpD=VapCp.getElementsByTagName("D")[0].getAttribute("value")
except IndexError:
VCpD = "0"
try:
VCpE=VapCp.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
VCpE = "0"
except IndexError:
VapCpEqn = "0"
VCpA = "0"
VCpB = "0"
VCpC = "0"
VCpD = "0"
VCpE = "0"
try:
LiqVis = comp.getElementsByTagName("LiquidViscosity")[0]
LiqVisEqn = LiqVis.getElementsByTagName("eqno")[0].getAttribute("value")
LiqVisA=LiqVis.getElementsByTagName("A")[0].getAttribute("value")
LiqVisB=LiqVis.getElementsByTagName("B")[0].getAttribute("value")
LiqVisC=LiqVis.getElementsByTagName("C")[0].getAttribute("value")
try:
LiqVisD=LiqVis.getElementsByTagName("D")[0].getAttribute("value")
except IndexError:
LiqVisD = "0"
try:
LiqVisE=LiqVis.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
LiqVisE = "0"
except IndexError:
LiqVisEqn = "0"
LiqVisA = "0"
LiqVisB = "0"
LiqVisC = "0"
LiqVisD = "0"
LiqVisE = "0"
try:
VapVis = comp.getElementsByTagName("VaporViscosity")[0]
VapVisEqn = VapVis.getElementsByTagName("eqno")[0].getAttribute("value")
VapVisA=VapVis.getElementsByTagName("A")[0].getAttribute("value")
VapVisB=VapVis.getElementsByTagName("B")[0].getAttribute("value")
VapVisC=VapVis.getElementsByTagName("C")[0].getAttribute("value")
try:
VapVisD=VapVis.getElementsByTagName("D")[0].getAttribute("value")
except IndexError:
VapVisD = "0"
try:
VapVisE=VapVis.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
VapVisE = "0"
except IndexError:
VapVisEqn = "0"
VapVisA = "0"
VapVisB = "0"
VapVisC = "0"
VapVisD = "0"
VapVisE = "0"
try:
LiqK = comp.getElementsByTagName("LiquidThermalConductivity")[0]
LiqKEqn = LiqK.getElementsByTagName("eqno")[0].getAttribute("value")
LiqKA=LiqK.getElementsByTagName("A")[0].getAttribute("value")
LiqKB=LiqK.getElementsByTagName("B")[0].getAttribute("value")
LiqKC=LiqK.getElementsByTagName("C")[0].getAttribute("value")
try:
LiqKD=LiqK.getElementsByTagName("D")[0].getAttribute("value")
except IndexError:
LiqKD = "0"
try:
LiqKE=LiqK.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
LiqKE = "0"
except IndexError:
LiqKEqn = "0"
LiqKA = "0"
LiqKB = "0"
LiqKC = "0"
LiqKD = "0"
LiqKE = "0"
try:
VapK = comp.getElementsByTagName("VaporThermalConductivity")[0]
VapKEqn = VapK.getElementsByTagName("eqno")[0].getAttribute("value")
VapKA=VapK.getElementsByTagName("A")[0].getAttribute("value")
VapKB=VapK.getElementsByTagName("B")[0].getAttribute("value")
VapKC=VapK.getElementsByTagName("C")[0].getAttribute("value")
try:
VapKD=VapK.getElementsByTagName("D")[0].getAttribute("value")
except IndexError:
VapKD = "0"
try:
VapKE=VapK.getElementsByTagName("E")[0].getAttribute("value")
except IndexError:
VapKE = "0"
except IndexError:
VapKEqn = "0"
VapKA = "0"
VapKB = "0"
VapKC = "0"
VapKD = "0"
VapKE = "0"
f = open('File5.txt','a')
f.write('model '+CompName)
f.write('\n')
f.write('extends General_Properties(')
f.write('\n')
f.write('SN ' + '=' + str(i) +',')
f.write('\n')
f.write('name' + '=' + '"'+ CompName + '",')
f.write('\n')
f.write('Tc ' + '=' + CriticalTemp + ',')
f.write('\n')
f.write('Pc ' + '=' + CriticalPres + ',')
f.write('\n')
f.write('Vc ' + '=' + CriticalVol + ',')
f.write('\n')
f.write('Cc ' + '=' + CriticalComp + ',')
f.write('\n')
f.write('Tb ' + '=' + NormalBoilPoint + ',')
f.write('\n')
f.write('Tm ' + '=' + NormalMeltingPoint + ',')
f.write('\n')
f.write('TT ' + '=' + TripPntTemp + ',')
f.write('\n')
f.write('TP ' + '=' + TripPntPres + ',')
f.write('\n')
f.write('MW ' + '=' + MolWt + ',')
f.write('\n')
f.write('LVB ' + '=' + LiqVolAtBoilPnt + ',')
f.write('\n')
f.write('AF ' + '=' + AcenFactor + ',')
f.write('\n')
f.write('SP ' + '=' + SolParam + ',')
f.write('\n')
f.write('DM ' + '=' + DipoleMoment + ',')
f.write('\n')
f.write('SH ' + '=' + SH + ',')
f.write('\n')
f.write('IGHF ' + '=' + IGHF + ',')
f.write('\n')
f.write('GEF ' + '=' + GEF + ',')
f.write('\n')
f.write('AS ' + '=' + AbsEntropy + ',')
f.write('\n')
f.write('HFMP ' + '=' + HeatFusionMeltPnt + ',')
f.write('\n')
f.write('HOC ' + '=' + HOC + ',')
f.write('\n')
f.write('LiqDen = {'+LiqDenEqn+","+A+","+B+","+C+","+D+","+E+'},')
f.write('\n')
f.write('VP = {'+VapPresEqn+","+VA+","+VB+","+VC+","+VD+","+VE+'},')
f.write('\n')
f.write('LiqCp = {'+LiqCpEqn+","+LCpA+","+LCpB+","+LCpC+","+LCpD+","+LCpE+'},')
f.write('\n')
f.write('HOV = {'+HOVEqn+","+HOVA+","+HOVB+","+HOVC+","+HOVD+","+HOVE+'},')
f.write('\n')
f.write('VapCp = {'+VapCpEqn+","+VCpA+","+VCpB+","+VCpC+","+VCpD+","+VCpE+'},')
f.write('\n')
f.write('LiqVis = {'+LiqVisEqn+","+LiqVisA+","+LiqVisB+","+LiqVisC+","+LiqVisD+","+LiqVisE+'},')
f.write('\n')
f.write('VapVis = {'+VapVisEqn+","+VapVisA+","+VapVisB+","+VapVisC+","+VapVisD+","+VapVisE+'},')
f.write('\n')
f.write('LiqK = {'+LiqKEqn+","+LiqKA+","+LiqKB+","+LiqKC+","+LiqKD+","+LiqKE+'},')
f.write('\n')
f.write('VapK = {'+VapKEqn+","+VapKA+","+VapKB+","+VapKC+","+VapKD+","+VapKE+'},')
f.write('\n')
f.write('Racketparam = '+RacketParam +',')
f.write('\n')
f.write('UniquacR = '+ UniquacR + ',')
f.write('\n')
f.write('UniquacQ = '+ UniquacQ + ');')
f.write('\n')
f.write('end '+CompName+';')
f.write('\n')
f.write('\n')
# f.write('function Psat')
# f.write('\n')
# f.write('input Real T;')
# f.write('\n')
# f.write('output Real Vp;')
# f.write('\n')
# f.write('algorithm')
# f.write('\n')
# f.write('Vp := exp(VP[2] + VP[3] / T + VP[4] * log(T) + VP[5] * T ^ VP[6]);')
# f.write('\n')
# f.write('end Psat;')
# f.write('\n')
# f.write('\n')
#
# f.write('function LCp')
# f.write('\n')
# f.write('input Real T;')
# f.write('\n')
# f.write('output Real Cp;')
# f.write('\n')
# f.write('algorithm')
# f.write('\n')
# f.write('Cp := (LiqCp[2] + exp(LiqCp[3] / T + LiqCp[4] + LiqCp[5] * T + LiqCp[6] * T ^ 2)) / 1000;')
# f.write('\n')
# f.write('end LCp;')
# f.write('\n')
# f.write('\n')
#
# f.write('function HV')
# f.write('\n')
# f.write('input Real T;')
# f.write('\n')
# f.write('output Real Hv;')
# f.write('\n')
# f.write('protected')
# f.write('\n')
# f.write('Real Tr = T / Tc;')
# f.write('\n')
# f.write('algorithm')
# f.write('\n')
# f.write('Hv := HOV[2] * (1 - Tr) ^ (HOV[3] + HOV[4] * Tr + HOV[5] * Tr ^ 2 + HOV[6] * Tr ^ 3) / 1000;')
# f.write('\n')
# f.write('end HV;')
# f.write('\n')
# f.write('\n')
#
# f.write('function HLiq')
# f.write('\n')
# f.write('input Real T;')
# f.write('\n')
# f.write('output Real Ent;')
# f.write('\n')
# f.write('protected')
# f.write('\n')
# f.write('Real Temp = 298.15;')
# f.write('\n')
# f.write('algorithm')
# f.write('\n')
# f.write('Ent := 0;')
# f.write('\n')
# f.write('while Temp < T loop')
# f.write('\n')
# f.write('Ent := Ent + LCp(Temp) * 1;')
# f.write('\n')
# f.write('Temp := Temp + 1;')
# f.write('\n')
# f.write('end while;')
# f.write('\n')
# f.write('Ent := SH / 1000 + Ent;')
# f.write('\n')
# f.write('end HLiq;')
# f.write('\n')
# f.write('\n')
#
# f.write('function HVap')
# f.write('\n')
# f.write('input Real T;')
# f.write('\n')
# f.write('output Real Ent;')
# f.write('\n')
# f.write('algorithm')
# f.write('\n')
# f.write('Ent := HLiq(T) + HV(T);')
# f.write('\n')
# f.write('end HVap;')
# f.write('\n')
i = i + 1
f.close()
|
RahulJain7/Openmodelica-Thermodynamic-Engine
|
PythonFiles/getComp.py
|
getComp.py
|
py
| 15,689 |
python
|
en
|
code
| 3 |
github-code
|
6
|
17609260691
|
# encoding: utf-8
import badgrlog
import datetime
from django.utils import timezone
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework import serializers
from rest_framework import status
from backpack.models import BackpackCollection, BackpackBadgeShare, BackpackCollectionShare
from backpack.serializers_v1 import CollectionSerializerV1, LocalBadgeInstanceUploadSerializerV1
from backpack.serializers_v2 import BackpackAssertionSerializerV2, BackpackCollectionSerializerV2, \
BackpackImportSerializerV2, BackpackAssertionAcceptanceSerializerV2
from entity.api import BaseEntityListView, BaseEntityDetailView
from issuer.models import BadgeInstance
from issuer.permissions import AuditedModelOwner, VerifiedEmailMatchesRecipientIdentifier, BadgrOAuthTokenHasScope
from issuer.public_api import ImagePropertyDetailView
from apispec_drf.decorators import apispec_list_operation, apispec_post_operation, apispec_get_operation, \
apispec_delete_operation, apispec_put_operation, apispec_operation
from mainsite.permissions import AuthenticatedWithVerifiedIdentifier
logger = badgrlog.BadgrLogger()
_TRUE_VALUES = ['true', 't', 'on', 'yes', 'y', '1', 1, 1.0, True]
_FALSE_VALUES = ['false', 'f', 'off', 'no', 'n', '0', 0, 0.0, False]
def _scrub_boolean(boolean_str, default=None):
if boolean_str in _TRUE_VALUES:
return True
if boolean_str in _FALSE_VALUES:
return False
return default
class BackpackAssertionList(BaseEntityListView):
model = BadgeInstance
v1_serializer_class = LocalBadgeInstanceUploadSerializerV1
v2_serializer_class = BackpackAssertionSerializerV2
create_event = badgrlog.BadgeUploaded
permission_classes = (AuthenticatedWithVerifiedIdentifier, VerifiedEmailMatchesRecipientIdentifier, BadgrOAuthTokenHasScope)
http_method_names = ('get', 'post')
valid_scopes = {
'get': ['r:backpack', 'rw:backpack'],
'post': ['rw:backpack'],
}
include_defaults = {
'include_expired': {'v1': 'true', 'v2': 'false'},
'include_revoked': {'v1': 'false', 'v2': 'false'},
'include_pending': {'v1': 'false', 'v2': 'false'},
}
def get_objects(self, request, **kwargs):
version = kwargs.get('version', 'v1')
include_expired = request.query_params.get(
'include_expired', self.include_defaults['include_expired'][version]
).lower() in ['1', 'true']
include_revoked = request.query_params.get(
'include_revoked', self.include_defaults['include_revoked'][version]
).lower() in ['1', 'true']
include_pending = request.query_params.get(
'include_pending', self.include_defaults['include_pending'][version]
).lower() in ['1', 'true']
def badge_filter(b):
if ((b.acceptance == BadgeInstance.ACCEPTANCE_REJECTED) or
(not include_expired and b.expires_at != None and b.expires_at < timezone.now()) or
(not include_revoked and b.revoked) or
(not include_pending and b.pending)):
return False
return True
return list(filter(badge_filter, self.request.user.cached_badgeinstances()))
@apispec_list_operation('Assertion',
summary="Get a list of Assertions in authenticated user's backpack ",
tags=['Backpack']
)
def get(self, request, **kwargs):
mykwargs = kwargs.copy()
mykwargs['expands'] = []
expands = request.GET.getlist('expand', [])
if 'badgeclass' in expands:
mykwargs['expands'].append('badgeclass')
if 'issuer' in expands:
mykwargs['expands'].append('issuer')
return super(BackpackAssertionList, self).get(request, **mykwargs)
@apispec_post_operation('Assertion',
summary="Upload a new Assertion to the backpack",
tags=['Backpack']
)
def post(self, request, **kwargs):
if kwargs.get('version', 'v1') == 'v1':
try:
return super(BackpackAssertionList, self).post(request, **kwargs)
except serializers.ValidationError as e:
self.log_not_created(e)
raise e
raise NotImplementedError("use BackpackImportBadge.post instead")
def log_not_created(self, error):
request = self.request
user = request.user
image_data = ''
user_entity_id = ''
error_name = ''
error_result = ''
if request.data.get('image', None) is not None:
image_data = request.data.get('image', '')[:1024]
if user is not None:
user_entity_id = user.entity_id
if len(error.detail) <= 1:
#grab first error
e = error.detail[0]
error_name = e.get('name', '')
error_result = e.get('result', '')
invalid_badge_upload_report = badgrlog.InvalidBadgeUploadReport(image_data, user_entity_id, error_name, error_result)
logger.event(badgrlog.InvalidBadgeUploaded(invalid_badge_upload_report))
def get_context_data(self, **kwargs):
context = super(BackpackAssertionList, self).get_context_data(**kwargs)
context['format'] = self.request.query_params.get('json_format', 'v1') # for /v1/earner/badges compat
return context
class BackpackAssertionDetail(BaseEntityDetailView):
model = BadgeInstance
v1_serializer_class = LocalBadgeInstanceUploadSerializerV1
v2_serializer_class = BackpackAssertionSerializerV2
permission_classes = (AuthenticatedWithVerifiedIdentifier, VerifiedEmailMatchesRecipientIdentifier, BadgrOAuthTokenHasScope)
http_method_names = ('get', 'delete', 'put')
valid_scopes = {
'get': ['r:backpack', 'rw:backpack'],
'put': ['rw:backpack'],
'delete': ['rw:backpack'],
}
def get_context_data(self, **kwargs):
context = super(BackpackAssertionDetail, self).get_context_data(**kwargs)
context['format'] = self.request.query_params.get('json_format', 'v1') # for /v1/earner/badges compat
return context
@apispec_get_operation('BackpackAssertion',
summary="Get detail on an Assertion in the user's Backpack",
tags=['Backpack']
)
def get(self, request, **kwargs):
mykwargs = kwargs.copy()
mykwargs['expands'] = []
expands = request.GET.getlist('expand', [])
if 'badgeclass' in expands:
mykwargs['expands'].append('badgeclass')
if 'issuer' in expands:
mykwargs['expands'].append('issuer')
return super(BackpackAssertionDetail, self).get(request, **mykwargs)
@apispec_delete_operation('BackpackAssertion',
summary='Remove an assertion from the backpack',
tags=['Backpack']
)
def delete(self, request, **kwargs):
obj = self.get_object(request, **kwargs)
related_collections = list(BackpackCollection.objects.filter(backpackcollectionbadgeinstance__badgeinstance=obj))
if obj.source_url is None:
obj.acceptance = BadgeInstance.ACCEPTANCE_REJECTED
obj.save()
else:
obj.delete()
for collection in related_collections:
collection.save()
request.user.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@apispec_put_operation('BackpackAssertion',
summary="Update acceptance of an Assertion in the user's Backpack",
tags=['Backpack']
)
def put(self, request, **kwargs):
fields_whitelist = ('acceptance',)
data = {k: v for k, v in list(request.data.items()) if k in fields_whitelist}
obj = self.get_object(request, **kwargs)
if not self.has_object_permissions(request, obj):
return Response(status=status.HTTP_404_NOT_FOUND)
context = self.get_context_data(**kwargs)
update_serializer = BackpackAssertionAcceptanceSerializerV2(obj, data, context=context)
update_serializer.is_valid(raise_exception=True)
update_serializer.save(updated_by=request.user)
main_serializer_class = self.get_serializer_class()
serializer = main_serializer_class(update_serializer.instance, context=context)
return Response(serializer.data)
class BackpackAssertionDetailImage(ImagePropertyDetailView, BadgrOAuthTokenHasScope):
model = BadgeInstance
prop = 'image'
valid_scopes = ['r:backpack', 'rw:backpack']
class BackpackCollectionList(BaseEntityListView):
model = BackpackCollection
v1_serializer_class = CollectionSerializerV1
v2_serializer_class = BackpackCollectionSerializerV2
permission_classes = (AuthenticatedWithVerifiedIdentifier, AuditedModelOwner, BadgrOAuthTokenHasScope)
valid_scopes = {
'get': ['r:backpack', 'rw:backpack'],
'post': ['rw:backpack'],
}
def get_objects(self, request, **kwargs):
return self.request.user.cached_backpackcollections()
@apispec_get_operation('Collection',
summary='Get a list of Collections',
tags=['Backpack']
)
def get(self, request, **kwargs):
return super(BackpackCollectionList, self).get(request, **kwargs)
@apispec_post_operation('Collection',
summary='Create a new Collection',
tags=['Backpack']
)
def post(self, request, **kwargs):
return super(BackpackCollectionList, self).post(request, **kwargs)
class BackpackCollectionDetail(BaseEntityDetailView):
model = BackpackCollection
v1_serializer_class = CollectionSerializerV1
v2_serializer_class = BackpackCollectionSerializerV2
permission_classes = (AuthenticatedWithVerifiedIdentifier, AuditedModelOwner, BadgrOAuthTokenHasScope)
valid_scopes = {
'get': ['r:backpack', 'rw:backpack'],
'post': ['rw:backpack'],
'put': ['rw:backpack'],
'delete': ['rw:backpack']
}
@apispec_get_operation('Collection',
summary='Get a single Collection',
tags=['Backpack']
)
def get(self, request, **kwargs):
return super(BackpackCollectionDetail, self).get(request, **kwargs)
@apispec_put_operation('Collection',
summary='Update a Collection',
tags=['Backpack']
)
def put(self, request, **kwargs):
return super(BackpackCollectionDetail, self).put(request, **kwargs)
@apispec_delete_operation('Collection',
summary='Delete a collection',
tags=['Backpack']
)
def delete(self, request, **kwargs):
return super(BackpackCollectionDetail, self).delete(request, **kwargs)
class BackpackImportBadge(BaseEntityListView):
v2_serializer_class = BackpackImportSerializerV2
permission_classes = (AuthenticatedWithVerifiedIdentifier, BadgrOAuthTokenHasScope,)
http_method_names = ('post',)
valid_scopes = ['rw:backpack']
@apispec_operation(
summary="Import a new Assertion to the backpack",
tags=['Backpack'],
parameters=[
{
"in": "body",
"name": "body",
"required": True,
"schema": {
"type": "object",
"properties": {
"url": {
"type": "string",
"format": "url",
"description": "URL to an OpenBadge compliant badge",
'required': False
},
"image": {
'type': "string",
'format': "data:image/png;base64",
'description': "base64 encoded Baked OpenBadge image",
'required': False
},
"assertion": {
'type': "json",
'description': "OpenBadge compliant json",
'required': False
},
}
},
}
]
)
def post(self, request, **kwargs):
context = self.get_context_data(**kwargs)
serializer_class = self.get_serializer_class()
serializer = serializer_class(data=request.data, context=context)
serializer.is_valid(raise_exception=True)
new_instance = serializer.save(created_by=request.user)
self.log_create(new_instance)
response_serializer = BackpackAssertionSerializerV2(new_instance, context=context)
return Response(response_serializer.data, status=status.HTTP_201_CREATED)
class ShareBackpackAssertion(BaseEntityDetailView):
model = BadgeInstance
permission_classes = (permissions.AllowAny,) # this is AllowAny to support tracking sharing links in emails
http_method_names = ('get',)
allow_any_unauthenticated_access = True
def get(self, request, **kwargs):
"""
Share a single badge to a support share provider
---
parameters:
- name: provider
description: The identifier of the provider to use. Supports 'facebook', 'linkedin'
required: true
type: string
paramType: query
"""
redirect = _scrub_boolean(request.query_params.get('redirect', "1"))
provider = request.query_params.get('provider')
if not provider:
return Response({'error': "unspecified share provider"}, status=status.HTTP_400_BAD_REQUEST)
provider = provider.lower()
source = request.query_params.get('source', 'unknown')
badge = self.get_object(request, **kwargs)
if not badge:
return Response(status=status.HTTP_404_NOT_FOUND)
include_identifier = _scrub_boolean(request.query_params.get('include_identifier', False))
share = BackpackBadgeShare(provider=provider, badgeinstance=badge, source=source)
share_url = share.get_share_url(provider, include_identifier=include_identifier)
if not share_url:
return Response({'error': "invalid share provider"}, status=status.HTTP_400_BAD_REQUEST)
share.save()
logger.event(badgrlog.BadgeSharedEvent(badge, provider, datetime.datetime.now(), source))
if redirect:
headers = {'Location': share_url}
return Response(status=status.HTTP_302_FOUND, headers=headers)
else:
return Response({'url': share_url})
class ShareBackpackCollection(BaseEntityDetailView):
model = BackpackCollection
permission_classes = (permissions.AllowAny,) # this is AllowAny to support tracking sharing links in emails
http_method_names = ('get',)
def get(self, request, **kwargs):
"""
Share a collection to a supported share provider
---
parameters:
- name: provider
description: The identifier of the provider to use. Supports 'facebook', 'linkedin'
required: true
type: string
paramType: query
"""
redirect = _scrub_boolean(request.query_params.get('redirect', "1"))
provider = request.query_params.get('provider')
if not provider:
return Response({'error': "unspecified share provider"}, status=status.HTTP_400_BAD_REQUEST)
provider = provider.lower()
source = request.query_params.get('source', 'unknown')
collection = self.get_object(request, **kwargs)
if not collection:
return Response(status=status.HTTP_404_NOT_FOUND)
share = BackpackCollectionShare(provider=provider, collection=collection, source=source)
share_url = share.get_share_url(provider, title=collection.name, summary=collection.description)
if not share_url:
return Response({'error': "invalid share provider"}, status=status.HTTP_400_BAD_REQUEST)
share.save()
if redirect:
headers = {'Location': share_url}
return Response(status=status.HTTP_302_FOUND, headers=headers)
else:
return Response({'url': share_url})
|
reedu-reengineering-education/badgr-server
|
apps/backpack/api.py
|
api.py
|
py
| 16,711 |
python
|
en
|
code
| 2 |
github-code
|
6
|
7068326150
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
# Author: kaname
# QQ: 1394041054
""" C4d analyzer """
# RUN:
# 1. From C4Dloader.py to loading RBAnalzer.py to do it.
# 2. AnalyzeC4d.py loading C4Dloader.py to do it.
import os
import sys
import subprocess
import string
import logging
import time
import shutil
reload(sys)
sys.setdefaultencoding('utf-8')
from C4d import C4d
from C4dLoader import C4dLoader
from C4dPluginManager import C4dPlugin, C4dPluginMgr
from CommonUtil import RBCommon as CLASS_COMMON_UTIL
class AnalyzeC4d(C4d):
def __init__(self, **paramDict):
C4d.__init__(self, **paramDict)
self.format_log('AnalyzeC4d.init', 'start')
self.G_TIPS_TXT_NODE=os.path.join(self.G_WORK_RENDER_TASK_CFG, 'tips.json').replace('\\','/')
for key, value in self.__dict__.items():
self.G_DEBUG_LOG.info(key + '=' + str(value))
self.format_log('done','end')
def RB_CONFIG(self):
self.G_DEBUG_LOG.info('[.hldoing]')
self.G_DEBUG_LOG.info('[.้
็ฝฎๆไปถๅผๅง]')
self.G_DEBUG_LOG.info('[C4d.Plugin.config.start......]')
self.plugin_config()
self.G_DEBUG_LOG.info('[.hldone]')
self.G_DEBUG_LOG.info('[.้
็ฝฎๆไปถๅฎๆ]')
self.G_DEBUG_LOG.info('[C4d.Plugin.config.end......]')
def RB_RENDER(self):
self.G_DEBUG_LOG.info('[c4d.RBanalyse.start.....]')
self.G_FEE_PARSER.set('render','start_time',str(int(time.time())))
cg_ver = self.G_CG_VERSION
task_id = self.G_TASK_ID
cg_file = self.G_INPUT_CG_FILE
task_json = self.G_TASK_JSON
asset_json = self.G_ASSET_JSON
tips_json = self.G_TIPS_TXT_NODE
c4d_loader = C4dLoader(cg_ver, task_id, cg_file, task_json, asset_json, tips_json)
c4d_loader.execute()
self.G_FEE_PARSER.set('render','end_time',str(int(time.time())))
self.G_DEBUG_LOG.info('[c4d.RBanalyse.end.....]')
|
kRayvison/Pycharm_python36
|
new_render_data/input/p/script/abort/back20180419/CG/C4d/python34_bak/process/AnalyzeC4d_201712191500.py
|
AnalyzeC4d_201712191500.py
|
py
| 1,972 |
python
|
en
|
code
| 1 |
github-code
|
6
|
39732689560
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 7 10:43:00 2022
@author: Christopher Corbell
"""
from tttoe.board import Board
class GameTree:
"""
A GameTree can generate a tree of tic tac toe Board nodes.
The tree is generate as tiers of boards at each ply.
Boards are labeled to indicate their paths through the tree.
Board 'aliases' can be used to prune isomorphic subtrees
that arise from different parents at the same play;
only one of such isomorphs is continued, the others have
their .alias field set to the label of that one for reference.
This is optional behavior; set 'skipAliases' to True in
the generateTree() call to identify and skip such isomorphs.
(Note that isomorphic children of the -same- parent board are always
skipped, only unique children of each parent are considered)
"""
def __init__(self, rootBoard:Board=None):
if None == rootBoard:
self.root = Board()
self.root.label = '0'
self.plies = []
else:
self.root = rootBoard
self.plies = []
def generateTree(self, skipAliases:bool):
lastPly = [self.root]
currentPly = []
depth = 0
while depth < 9:
for parent in lastPly:
if True == skipAliases:
if not (None == parent.alias):
print (f"skipping parent {parent.label}, alias of {parent.alias}...")
continue
if parent.isWin():
print (f"skipping parent {parent.label}, {parent.winString()}")
continue
#if parent.isDraw():
# continue
children = GameTree.generateChildBoards(parent)
print (f"...generated {len(children)} child boards from parent {parent.label}")
currentPly.extend(children)
if skipAliases:
GameTree.determineAliases(currentPly)
self.plies.append(currentPly)
lastPly = currentPly
currentPly = []
depth += 1
def print_tree_size(self):
totalSize = 1
for n in range(0, len(self.plies)):
print(f"ply {n+1} size: {len(self.plies[n])}")
totalSize += len(self.plies[n])
print(f"total tree size (including root, excluding isomorphic branches): {totalSize}")
def print_tree(self):
print (self.root)
for n in range(0, len(self.plies)):
print ('===========================================')
print (f'ply {n+1}:')
for board in self.plies[n]:
print (board)
print('')
def print_ply(self, index):
if index == 0:
print ('=========================================== 1 board at root')
print (f"{self.root}\n")
else:
print (f'=========================================== {len(self.plies[index-1])} boards in ply {index}')
for board in self.plies[index - 1]:
print(f"{board}\n")
def generateChildBoards(parent: Board):
"""
Given a parent game board, generate all of its next-move boards
up to isomorphism. Boards are canonical and are labeled
with increasing integers appended to the parent label.
For example, if parent is labeled '0.3' and there are 4 child
boards, they will be labeled '0.3.0, '0.3.1', 0.3.2', 0.3.3'.
Parameters
----------
parent : Board
A parent board.
Returns
-------
childBoards : [Board]
A list of all possible child boards (next plays of the game
from the parent board state) up to isomorphism.
"""
childBoardCanonicalStrings = []
blocks = set()
isPlayingX = parent.nextPlayer() == Board.X_TOKEN
emptyIndices = parent.empty_indices()
for playIndex in emptyIndices:
childBoard = parent.copy()
if isPlayingX:
childBoard.xplay(playIndex)
else:
childBoard.oplay(playIndex)
childStr = childBoard.canonicalString()
if childBoard.block:
blocks.add(childStr)
if not childStr in childBoardCanonicalStrings:
childBoardCanonicalStrings.append(childStr)
del childBoard
childBoards = []
index = 0
for gridString in childBoardCanonicalStrings:
child = Board()
child.grid = list(gridString)
child.label = parent.label + "." + str(index)
if gridString in blocks:
print(f"- block played in board {child.label}")
child.block = True
index += 1
childBoards.append(child)
return childBoards
def determineAliases(plyboards):
"""
Given a list of child-board lists of the same ply,
determine aliases of isomorphic boards. This assumes
all boards are already canonical so we can simply compare
lexical string representation.
As a subtle improvement, we use the block property of
each board to prefer blocks as alias targets (so continuing
isomorphic branches explicitly have blocking plays as parent nodes)
(A block is a play which removes a winning next-move opportunity)
Parameters
----------
childBoardLists : [[Board]]
A list of Board lists, expected to be returned by
generateChildBoards for the same ply level (game-tree depth)
Returns
-------
None; alias value is set on Board objects passed in
"""
# resolve aliases to block-play boards first
block_indices = [idx for idx, element in enumerate(plyboards) if element.block]
for block_index in block_indices:
target = plyboards[block_index]
for otherIndex in range(0, len(plyboards)):
if otherIndex in block_indices:
continue
other = plyboards[otherIndex]
if not None == other.alias: # already determined
continue
if target.lexstring() == other.lexstring():
other.alias = target.label
# now resolve everything else
targetIndex = 0
for targetIndex in range(0, len(plyboards) - 1):
target = plyboards[targetIndex]
if target.block: # already processed
continue
if not None == target.alias: # already determined to be alias
continue
for laterIndex in range(targetIndex+1, len(plyboards)):
later = plyboards[laterIndex]
if not None == later.alias: # already determined
continue
if target.lexstring() == later.lexstring():
# found an alias
later.alias = target.label
|
ccorbell/gametheory
|
tttoe/gametree.py
|
gametree.py
|
py
| 7,352 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3990110801
|
from functions import *
from create_directory import *
from Crypto.Cipher import AES
import os
import shutil
import time
home = user_home()
if os.path.exists(home + "DataShareSecure") == False:
print("\nNous vous prions de lire le fichier \"Readme.txt\" et de suivre ces consignes.\n")
sys.exit()
print("BIENVENUE DANS CE PROGRAMME DE DECHIFFREMENT DE FICHIERS\n")
print("######### BON ร SAVOIR ##########\n")
print("Vous exรฉcutez ce programme stipule que:\n\n"
"1- Vous avez pris connaissance du fonctionnement de DataShareSecure grรขce au \"Readme.txt\" \n"
"2- Vous avez exรฉcutรฉ le programme \"Public_Key_Manage.py\" au moins une fois et disposer donc d'une "
"paire de clรฉs\n"
"3- Vous dรฉsirez dรฉchiffrer des fichiers que vous avez reรงus d'un correspondant\n")
print("Si vous ne remplissez pas toutes les conditions du \"BON ร SAVOIR\", je vous invite ร fermer ce programme.\n"
"Et ร prendre le temps de remplir ces conditions.\n")
choix = input("Remplissez-vous les conditions sus-citรฉs ? (O)ui ou (N)on : ")
if choix == 'O' or choix =='o':
print("\nBien. Nous pouvons donc continuer\n")
vide_directory(home + "DataShareSecure/Encrypted")
vide_directory(home + "DataShareSecure/Decrypted")
os.chdir(home + "DataShareSecure/Received")
path = home + 'DataShareSecure/Received/key_used'
with open(path, "r") as file:
key_encrypted = file.read()
key = dechiffrer(key_encrypted)
buffer_size = 65536 # 64kb
########## MOVE FILE ############
print("######## DECHIFFREMENT DES FICHIERS ET VERIFIVATION DES SIGNATURES ######## \n")
file_dir = []
file = [f for f in os.listdir(home + "DataShareSecure/Received") if os.path.isfile(f)]
for f in file:
if ".dss" in f:
shutil.copy(f, home + "DataShareSecure/Encrypted")
elif ".asc" in f:
shutil.copy(f, home + "DataShareSecure/Decrypted")
########## DECRYPT ###############
print("\n############# DECHIFFREMENT DES FICHIERS REรUES ############\n")
os.chdir(home + "DataShareSecure/Encrypted")
files_dir = []
files = [f for f in os.listdir(home + "DataShareSecure/Encrypted") if os.path.isfile(f)]
for f in files:
files_dir.append(f)
for x in files_dir:
with open(home + "DataShareSecure/Encrypted/" + x, "rb") as f:
f.seek(0)
path = home + 'DataShareSecure/Decrypted/' + x
output_file = open(path[:-4], "wb")
iv = f.read(16)
cipher_encrypt = AES.new(key, AES.MODE_CFB, iv=iv)
buffer = f.read(buffer_size)
while len(buffer) > 0:
decrypted_bytes = cipher_encrypt.decrypt(buffer)
output_file.write(decrypted_bytes)
buffer = f.read(buffer_size)
print("Vos fichiers dรฉchiffrรฉs sont enregistrรฉs dans le repertoire \"Decrypted\". \n")
########## VERIFY SIGNATURE ###############
print("\n############ VERIFICATION DES FICHERS REรUES #################\n")
os.chdir(home + "DataShareSecure/Decrypted/")
files_dir = []
files = [f for f in os.listdir(home + "DataShareSecure/Decrypted/") if os.path.isfile(f)]
for f in files:
if ".asc" in f:
files_dir.append(f)
for x in files_dir:
with open(home + "DataShareSecure/Decrypted/" + x, "rb") as f:
file = x[:-4]
verified = gpg.verify_file(f, file)
print(file + " : ", verified.status + "")
print("\nNOUS VOICI ร LA FIN\n")
|
Su1M01/DataShareSecure
|
Receiver.py
|
Receiver.py
|
py
| 3,616 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
26061079286
|
import nltk
from nltk.tokenize import *
import numpy as np
#--------------------------------------------------------
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
punctuation = ['?',',','!','.',':',';']
char_count= [0] * len(alphabet)
punctuation_count = [0] * len(punctuation)
#--------------------------------------------------------
# PART OF SPEECH STUFF
#--------------------------------------------------------
#part of speech ratios + lexical variety
# - determiners
# - prepositions
# - pronouns
# - modal auxiliary-verbs -> CAN, COULD, WILL, WOULD
# - adverbs
# - coord-conjuctions
# - nouns
# - proper-nouns
# - adjectives
# - verbs
# - lexical variety = nouns + proper_nouns + adjectives + verbs + adverbs
pronouns_list = ['PRP', 'PRP$', 'WP', 'WP$']
adverbs_list = ['RB' ,'RBR', 'RBS', 'WRB']
adjectives_list = ['JJ', 'JJR', 'JJS']
verbs_list = ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
pos_ratios = [0] * 11
avg_sentence_length = 0
avg_word_length = 0
total_words = 0
#--------------------------------------------------------
def main():
np.set_printoptions(suppress=True)
features = []
text = open("training_set\Abraham Lincoln\Abraham Lincoln___Lincoln Letters.txt").read()
#total useful char
t_u_c = total_useful_char(text)
total_puctuation = count_punctuation(text)
total_words = len(word_tokenize(text))
#FEATURES 1 - 26
letters_frequency(text, t_u_c)
#FEATURES 27 - 32
punctuation_frequency(text, total_puctuation)
#FEATIRES 33 - 44
part_of_speech_ratios(text, total_words)
#FEATURES 44 - 45
avg_sentence_length = average_sentence_length(text)
avg_word_length = average_word_length(text)
features.extend(char_count)
features.extend(punctuation_count)
features.extend(pos_ratios)
features.append(avg_sentence_length)
features.append(avg_word_length)
features.append(total_words)
features = np.array(features).reshape(-1,1)
print("\n\n FEATURES final array: \n", features)
print(features.shape)
def average_word_length(text):
words = word_tokenize(text)
sum = 0
for word in words:
sum += len(word)
return sum/len(words)
def average_sentence_length(text):
sentences = sent_tokenize(text)
sum = 0
for sentence in sentences:
sum += len(word_tokenize(sentence))
return sum/len(sentences)
def count_punctuation(text):
return text.count('?') + text.count(',') + text.count('!') + text.count('.') + text.count(':') + text.count(';')
def total_useful_char(text):
return len(text) - text.count(" ") - text.count("\n")
def letters_frequency(text, tChar):
for char in text.lower():
if char in alphabet:
char_count[alphabet.index(char)] += 1
for letter in char_count:
char_count[char_count.index(letter)] /= tChar
def punctuation_frequency(text, total_puctuation):
for char in text:
if char in punctuation:
punctuation_count[punctuation.index(char)] += 1
for element in punctuation_count:
punctuation_count[punctuation_count.index(element)] /= total_puctuation
def part_of_speech_ratios(text, total_words):
words = word_tokenize(text)
tagged_words = nltk.pos_tag(words)
# lexical variety = nouns + proper_nouns + adjectives + verbs + adverbs
for tagged_word in tagged_words:
is_a_pronoun = [pronoun for pronoun in pronouns_list if(pronoun in tagged_word)]
is_a_adverb = [adverb for adverb in adverbs_list if(adverb in tagged_word)]
is_a_adjective = [adjective for adjective in adjectives_list if(adjective in tagged_word)]
is_a_verb = [verb for verb in verbs_list if(verb in tagged_word)]
if 'DT' in tagged_word:
pos_ratios[0] += 1
elif 'IN' in tagged_word:
pos_ratios[1] += 1
elif is_a_pronoun:
pos_ratios[2] += 1
elif 'MD' in tagged_word:
pos_ratios[3] += 1
elif is_a_adverb:
pos_ratios[4] += 1
pos_ratios[10] += 1
elif 'CC' in tagged_word:
pos_ratios[5] += 1
elif ('NN' in tagged_word or 'NNS' in tagged_word):
pos_ratios[6] += 1
pos_ratios[10] += 1
elif ('NNP' in tagged_word or 'NNPS' in tagged_word):
pos_ratios[7] += 1
pos_ratios[10] += 1
elif is_a_adjective:
pos_ratios[8] += 1
pos_ratios[10] += 1
elif is_a_verb:
pos_ratios[9] += 1
pos_ratios[10] += 1
for element in pos_ratios:
pos_ratios[pos_ratios.index(element)] /= total_words
if __name__ == '__main__':
main()
|
andresOchoaHernandez/AuthorshipRecognition
|
PythonPrototype/extract_features.py
|
extract_features.py
|
py
| 4,777 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35374859965
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 8 22:42:38 2022
@author: sanggupark
"""
import numpy as np
import traci
from dataclasses import dataclass
import math
from shapely.geometry import LineString, Point
from SimpleMath import create_vehicle_shape
@dataclass(init = True)
class Object_sensed:
ID: str
xpos: float
ypos: float
vel: float
angle: float
width: float
length: float
acc_max: float = 4.0
dec_max: float = 7.0
dec_min: float = 2.0
response: float = 0.2
blinker: int = 0
def lidar_sensing(ego, veh_other):
xpos = ego.xpos
ypos = ego.ypos
length = ego.length
rad = np.radians(ego.angle)
p_tail = Point([xpos-(length)*math.sin(rad),
ypos-(length)*math.cos(rad)])
# FOV control
if "LF" in ego.behavior:
angles = np.linspace(rad+(ego.sensor.fov/4), rad-(ego.sensor.fov/4), 100)
elif "LC_R" in ego.behavior:
angles = np.linspace(rad+(ego.sensor.fov/2.0), rad-(ego.sensor.fov/2.0), 100)
elif "LC_L" in ego.behavior:
angles = np.linspace(rad+(ego.sensor.fov/2.0), rad-(ego.sensor.fov/2.0), 100)
distance = ego.sensor.radius * 1.00
lines = []
for angle in angles:
line = LineString([[p_tail.x, p_tail.y], [p_tail.x + distance*math.sin(angle), p_tail.y + distance*math.cos(angle)]])
lines.append(line)
vehicles_sensed = []
follower = traci.vehicle.getFollower(ego.ID, dist=5.0)[0]
""" LIDAR Sensing """
for veh in veh_other:
is_detected = False
poly_veh = create_vehicle_shape(veh)
if veh.ID != ego.ID:
for line in lines:
is_detected = poly_veh.intersects(line)
if is_detected:
break
if is_detected and not (veh.ID == follower):
vehicles_sensed.append(veh)
return vehicles_sensed
def blinker_sensing(ego, vehicles_sensed):
""" Blinker Sensing """
for veh in vehicles_sensed:
blinker = traci.vehicle.getSignals(veh.ID)
# If LF """
if blinker == 0:
veh.blinker = 0
# If LC_R """
elif blinker == 1:
veh.blinker = -1
# If LC_L """
elif blinker == 2:
veh.blinker = 1
return vehicles_sensed
def update_prev_info(ego, vehicles_sensed):
""" Update Old info """
for veh in vehicles_sensed:
if 'auv' in veh.ID:
object_add = Object_sensed(veh.ID, veh.xpos, veh.ypos, veh.vel, veh.angle, veh.width, veh.length, blinker=veh.blinker)
elif 'huv' in veh.ID:
blinker = traci.vehicle.getSignals(veh.ID)
if blinker == 1:
blinker = -1
elif blinker == -1:
blinker = 1
else:
blinker = 0
object_add = Object_sensed(veh.ID, veh.xpos, veh.ypos, veh.vel, veh.angle, veh.width, veh.length, blinker=-traci.vehicle.getSignals(veh.ID))
if len(ego.objects_sensed):
flag = False
for obj in ego.objects_sensed:
if obj.ID == object_add.ID:
# replacement due to overlaps
ego.objects_sensed[np.where(ego.objects_sensed==obj)] = object_add
flag = True
if not flag:
# add if no overlaps
ego.objects_sensed = np.append(ego.objects_sensed, object_add)
else:
# if the list is empty
ego.objects_sensed = np.append(ego.objects_sensed, object_add)
return
|
sanggu-park/blaft_simulation
|
Sensing.py
|
Sensing.py
|
py
| 3,640 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9169202815
|
from tkinter import *
from tkinter.messagebox import *
def sum():
r=''
r=int(e.get())
if r=='':
showinfo('messae','Enter the value')
else:
t=r*r
l2.configure(bg='#209f49',text='Area of circle: {:.1f}'.format(t))
root=Tk()
root.title('Calculate')
root.geometry('280x220')
root.configure(bg='white')
l1=Label(text='Enter the radius of circle',font='times 20')
l2=Label(font='times 20')
e=Entry(width=15,bd='5px',font='times 20',bg='#6ec1ff')
b3=Button(root,text='Ok',width=10,font=('times',15,'bold'),bd='3px',bg='#6ec1ff',command=sum)
l2.grid(row=4,column=0)
b3.grid(row=2,column=0)
l1.grid(row=0,column=0)
e.grid(row=1,column=0)
root.mainloop()
|
Devcoder980/twinkle
|
calculcircel.py
|
calculcircel.py
|
py
| 688 |
python
|
en
|
code
| 1 |
github-code
|
6
|
37957784515
|
'''
eg> nodes
value - key
a - 5
b - 15
c - 3
d - 10
e - 25
in huffing tree:
Each node is either a leaf with one of the key and value specified or has 2 children with value '*' and key with value addition of both children node.
This important property will be exploited to make an easy tree
There will be total n leaf nodes and n-1 non-leaf nodes (in our case 5 leaf and 4 non-leaf nodes)
eg. of formed tree
58
(*)
33 25
(*) (E)
18 15
(*) (B)
10 8
(D) (*)
5 3
(A) (C)
Array Structure:
0 1 2 3 4 5 6 7 8
* * E * B D * A C
58 33 25 18 15 10 8 5 3
'''
data=[
('a',5),
('b',15),
('c',3),
('d',10),
('e',25),
]
from operator import itemgetter
data = sorted(data, key = itemgetter(1))
print('INPUT DATA:')
print (data)
data2=[]
while(len(data)>1):
a=data[0]
b=data[1]
c=('*',a[1]+b[1])
data.pop(0)
data.pop(0)
data.append(c)
data = sorted(data, key = itemgetter(1))
if(a[0]!='*'):
data2.append(a)
if(b[0]!='*'):
data2.append(b)
data2.append(c)
#a=data[0]
data.pop(0)
#data2.append(a)
print('\nTREE DATA:')
print(data2)
data2=sorted(data2, key = itemgetter(1))
data2=data2[::-1]
print('\nSORTED TREE DATA or TREE STRUCTURE')
print(data2)
'''
In assembly code this can be achieved by storing all the elements in array and finally sorting them as mentioned in previous assembly functions.
Achieved Array Structure:
0 1 2 3 4 5 6 7 8
* * E * B D * A C
58 33 25 18 15 10 8 5 3
'''
###TREE PERCOLATION and CREATING NEW BINARIES
indexes=[1]
for i in range(len(data2)):
if(data2[i][0]=='*'):
indexes.append(2*indexes[i])
indexes.append(2*indexes[i]+1)
print('\nINDICES')
print(indexes)
'''
Data Structures:
0 1 2 3 4 5 6 7 8
ARRAY
* * E * B D * A C
58 33 25 18 15 10 8 5 3
INDICES
1 2 3 4 5 8 9 18 19
These indices are what should be the actual indices is this was a complete(balanced) binary tree.
58
(*)
(1)
33 25
(*) (E)
(2) (3)
18 15
(*) (B)
(4) (5)
10 8
(D) (*)
(8) (9)
5 3
(A) (C)
(18) (19)
Now only step left is to find out the frequencies.
Compressed representations will simply be the binary representation of these numbers with the significant bit removed.
eg (For this tree)
letters freq index binary repr. compressed repr.
a 5 18 10010 0010
b 15 5 101 01
c 3 19 10011 0011
d 10 8 1000 000
e 25 3 11 1
'''
print('\nCompressed bit representations')
for i in range(len(data2)):
if(data2[i][0]!='*'):
print(str(data2[i][0])+': '+bin(indexes[i])[3:])
'''
Thanks
~~Work originally done by ANIKET AGRAWAL
~~NOT COPIED FROM ANY SOURCES
'''
|
vjg28/Huffman-Coding-ARM
|
tree.py
|
tree.py
|
py
| 3,009 |
python
|
en
|
code
| 1 |
github-code
|
6
|
35512343107
|
# ะะฐะฝะพ: ะฟะพัะปะตะดะพะฒะฐัะตะปัะฝะพััั ัััะพะบ.
#
# ะะฐะดะฐะฝะธะต: ะฒั ะดะพะปะถะฝั ะพะฑัะตะดะธะฝะธัั ััะธ ัััะพะบะธ ะฒ ะฑะปะพะบ ัะตะบััะฐ, ัะฐะทะดะตะปะธะฒ ะธะทะฝะฐัะฐะปัะฝัะต ัััะพะบะธ ะทะฐะฟัััะผะธ.
# ะ ะบะฐัะตััะฒะต ัััะบะธ ะฝะฐะด ะฟัะฐะฒะพััะบะธะผะธ ัะพะฑะพัะฐะผะธ, ะฒั ะดะพะปะถะฝั ะทะฐะผะตะฝะธัั ะฒัะต ะฒั
ะพะถะดะตะฝะธั ัะปะพะฒะฐ "right" ะฝะฐ ัะปะพะฒะฐ "left",
# ะดะฐะถะต ะตัะปะธ ััะพ ัะฐััั ะดััะณะพะณะพ ัะปะพะฒะฐ. ะัะต ัััะพะบะธ ะดะฐะฝั ะฒ ะฝะธะถะฝะตะผ ัะตะณะธัััะต.
right: str = "right"
left: str = "left"
def is_exist_left(value):
try:
value.index(left)
return True
except:
return False
def is_exist_right(value):
try:
value.index(right)
return True
except:
return False
value_list: list = ["left", "right", "left", "stop", "bright aright", "ok", "enough", "jokes"]
result: str = ''
for value in value_list:
if(is_exist_right(value)):
value = value.replace(right, left)
result += value + ","
elif(is_exist_left(value)):
value = value.replace(left, right)
result += value + ","
else:
result += value + ","
print("ะ ะตะทัะปััะฐั: ", result[:-1])
|
annapodl18/Command-flow
|
Left_hand.py
|
Left_hand.py
|
py
| 1,256 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
71842718268
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable #autograd oops
import torch.optim as optim
# core code of TRADES
# shhhhhh where does the author use l2-norm?????
def squared_l2_norm(x):
flattened = x.view(x.unsqueeze(0).shape[0], -1)
return (flattened ** 2).sum(1)
def l2_norm(x):
return squared_l2_norm(x).sqrt()
# core function for TRADES calculating traded_loss
def trades_loss(model,
x_natural,
y,
optimizer,
step_size=0.003,
epsilon=0.031,
perturb_steps=10,
beta=1.0, # the coeff of second term
distance='l_inf'):
# define KL-loss for inner maximization https://pytorch.org/docs/stable/generated/torch.nn.KLDivLoss.html?highlight=kldivloss#torch.nn.KLDivLoss
# If the field size_average is set to False, the losses are instead summed for each minibatch.
criterion_kl = nn.KLDivLoss(size_average=False)
# how to use loss : f_loss(*args)(input) <- two parenthesis
#eval() for BN and Dropout
model.eval()
# feed x_natural here into the loss as a batch
batch_size = len(x_natural)
# generate adversarial example
# initiate an x_adv for skipping the concave
x_adv = x_natural.detach() + 0.001 * torch.randn(x_natural.shape).cpu().detach()
# detach() tensor won't give it grad calculations anymore.
if distance == 'l_inf': # L-infinity ball # no random start here
for _ in range(perturb_steps): # FGSM_k
x_adv.requires_grad_() # start from x_adv
with torch.enable_grad(): # enable_grad vs no_grad
# For the maximization problem, using torch.nn.KLDivLoss and cross entropy is equivalent because they differ by a constant additive term.
loss_kl = criterion_kl(F.log_softmax(model(x_adv), dim=1),
F.softmax(model(x_natural), dim=1)) # why first term log while second term origin: because in the loss_criteria, there is no "log_target = True"
grad = torch.autograd.grad(loss_kl, [x_adv])[0] # Computes and returns the sum of gradients of outputs w.r.t. the inputs.
x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())
#clamp ..
x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon)
#clamp original pic
x_adv = torch.clamp(x_adv, 0.0, 1.0)
elif distance == 'l_2':# L_2 we will come back later about l_2....not commented yet
delta = 0.001 * torch.randn(x_natural.shape).cpu().detach()
delta = Variable(delta.data, requires_grad=True)
# Setup optimizers
optimizer_delta = optim.SGD([delta], lr=epsilon / perturb_steps * 2)
for _ in range(perturb_steps):
adv = x_natural + delta
# optimize
optimizer_delta.zero_grad()
with torch.enable_grad():
loss = (-1) * criterion_kl(F.log_softmax(model(adv), dim=1),
F.softmax(model(x_natural), dim=1))
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
optimizer_delta.step()
# projection
delta.data.add_(x_natural)
delta.data.clamp_(0, 1).sub_(x_natural)
delta.data.renorm_(p=2, dim=0, maxnorm=epsilon)
x_adv = Variable(x_natural + delta, requires_grad=False)
# not implemented for other losses
else:
x_adv = torch.clamp(x_adv, 0.0, 1.0)
# adding two losses: L(fx,y) , L(fx',fx)
model.train()
x_adv = Variable(torch.clamp(x_adv, 0.0, 1.0), requires_grad=False)
# not the main part, code related only
# zero gradient again, zero_grad -> loss_back -> updae?
optimizer.zero_grad()
# calculate robust loss
logits = model(x_natural) # pred of fx
loss_natural = F.cross_entropy(logits, y) # loss of fx,y
# loss of fx' fx
loss_robust = (1.0 / batch_size) * criterion_kl(F.log_softmax(model(x_adv), dim=1),
F.softmax(model(x_natural), dim=1))
loss = loss_natural + beta * loss_robust
return loss
|
yaoyugua/TRADES
|
TRADES-master/trades.py
|
trades.py
|
py
| 4,661 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20493833703
|
import json
d1 = {
'Pessoa 1': {
'nome:': 'Luiz Augusto',
'idade:': 25,
},
'Pessoa 2': {
'nome:': 'Adriano Santos',
'idade:': 30,
},
}
print()
print(d1,'\n')
d1_json = json.dumps(d1, indent=True)
with open('arquivo.json', 'w+') as file:
file.write(d1_json)
print(d1_json)
|
Adriano1976/Curso-de-Python
|
Secao03-Programacao-Procedural/Aula087-Arquivos-Criar-ler-escrever-e-apagar/main.py
|
main.py
|
py
| 329 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
71674830588
|
import genanki
import functools
import os
TRUE_FALSE_MODEL_ID = 1803127777
@functools.lru_cache()
def load_true_false_model():
data = {}
for fname in ['fields.json', 'templates.yaml', 'cards.css']:
path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'true_false_model',
fname)
with open(path) as f:
data[fname] = f.read()
return genanki.Model(
TRUE_FALSE_MODEL_ID,
'Anatomy True False',
fields=data['fields.json'],
templates=data['templates.yaml'],
css=data['cards.css'],
)
class AnatomyTrueFalseNote(genanki.Note):
def __init__(self, *args, **kwargs):
super().__init__(load_true_false_model(), *args, **kwargs)
MULTIPLE_CHOICE_MODEL_ID = 1803127778
@functools.lru_cache()
def load_multiple_choice_model():
data = {}
for fname in ['fields.json', 'templates.yaml', 'cards.css']:
path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'multiple_choice_model',
fname)
with open(path) as f:
data[fname] = f.read()
return genanki.Model(
MULTIPLE_CHOICE_MODEL_ID,
'Anatomy Multiple Choice',
fields=data['fields.json'],
templates=data['templates.yaml'],
css=data['cards.css'],
)
class AnatomyMultipleChoiceNote(genanki.Note):
def __init__(self, *args, **kwargs):
super().__init__(load_multiple_choice_model(), *args, **kwargs)
|
kerrickstaley/anatomyquestions
|
note.py
|
note.py
|
py
| 1,414 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38696996514
|
# coding=utf-8
import requests
from browse_airbnb import get_reviews
from time import strftime
from pablo import Pablo
def get_all_reviews(logement_id):
bdd = Pablo()
insert_query = """INSERT INTO airbnb_review_global
(review_id, author_id, listing_id, recipient_id, content, date_creation,
language, date_extract)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"""
for review in get_reviews(logement_id):
review_id = review['id']
author_id = review['author_id']
listing_id = review['listing_id']
recipient_id = review['recipient_id']
content = review['comments']
date_creation = review['created_at'][:10]
language = review['language']
params = (review_id, author_id, listing_id, recipient_id, content,
date_creation, language, strftime("%Y%m%d"))
bdd.exec_req_with_args(insert_query, params)
bdd.close()
def get_some_review_paris():
bdd = Pablo()
i = 0
# bdd.executerReq("SELECT distinct listing_id from airbnb_reviews_20k order by id desc")
req = """SELECT listing_id FROM airbnb_reviews_20k WHERE listing_id NOT IN
(SELECT DISTINCT listing_id FROM airbnb_review_global WHERE date_creation > 20170531
AND date_creation < 20170701)"""
bdd.executerReq(req)
for listing in bdd.resultatReq()[::-1]:
i += 1
id_listing = listing[0]
print("listing number : %s" % i)
get_all_reviews(id_listing)
bdd.close()
if __name__ == '__main__':
get_some_review_paris()
|
pablo-a/airbnb
|
get_all_reviews_listing.py
|
get_all_reviews_listing.py
|
py
| 1,585 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33850641451
|
# ็ฌฌ 0002 ้ข๏ผ็ๆ็200ไธชๆฟๆดป็ ไฟๅญๅจmysqlๅ
ณ็ณปๅๆฐๆฎๅบไธญ
import random, string
import pymysql
def get_string(num, length=10):
codes = []
chars = string.ascii_uppercase + string.digits
for i in range(num):
one_code = random.sample(chars, length)
codes.append(''.join(one_code))
return codes
def save_code_mysql():
try:
conn = pymysql.connect(host='localhost', user='root', password='123456', charset='UTF8')
cur = conn.cursor()
except BaseException as e:
print(e)
else:
try:
cur.execute("CREATE DATABASE IF NOT EXISTS code_mysql")
cur.execute("USE code_mysql")
cur.execute("CREATE TABLE IF NOT EXISTS codes (id INT AUTO_INCREMENT PRIMARY KEY, code VARCHAR(32))")
codes = get_string(200)
for code in codes:
cur.execute("INSERT INTO codes(code) values(%s)", (code))
conn.commit()
cur.execute("SELECT * FROM codes")
result = cur.fetchall()
for i in result:
print(i)
except BaseException as e:
print(e)
finally:
cur.close()
conn.close()
if __name__ == '__main__':
save_code_mysql()
|
akenYu/learnpy
|
showme/02/savemysql.py
|
savemysql.py
|
py
| 1,070 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70167475709
|
#!/usr/bin/env python
from setuptools import setup, Extension
import os
from os import popen
from os.path import dirname, join
class lazy_cythonize(list):
def __init__(self, callback):
self._list = None
self.callback = callback
def c_list(self):
if self._list is None:
self._list = self.callback()
return self._list
def __iter__(self):
return iter(self.c_list())
def __getitem__(self, ii):
return self.c_list()[ii]
def __len__(self):
return len(self.c_list())
# for CWB 2.2
#extra_libs = []
# for CWB >= 3.0
extra_libs = ['pcre', 'glib-2.0']
if 'CWB_DIR' in os.environ:
cqp_dir = os.environ['CWB_DIR']
else:
cqp_location = popen('which cqp').read().rstrip()
cqp_dir = dirname(cqp_location)
def extensions():
try:
from Cython.Build import cythonize
incdirs = ['src', join(cqp_dir, 'include')]
except ImportError:
cythonize = lambda x: x
incdirs = []
ext_modules = [Extension('CWB.CL', ['src/CWB/CL.pyx'],
include_dirs=incdirs,
library_dirs=[join(cqp_dir, 'lib')],
libraries=['cl'] + extra_libs)]
return cythonize(ext_modules)
def read(fname):
return open(fname).read()
setup(
name='cwb-python',
description='CQP and CL interfaces for Python',
author='Yannick Versley / Jorg Asmussen',
version='0.2.1',
author_email='[email protected]',
url='https://bitbucket.org/yannick/cwb-python',
ext_modules=lazy_cythonize(extensions),
py_modules=['PyCQP_interface'],
packages=['CWB', 'CWB.tools'],
long_description=read('README'),
entry_points={
'console_scripts': [
'cqp2conll = CWB.tools.cqp2conll:main',
'cqp_bitext = CWB.tools.make_bitext:main',
'cqp_vocab = CWB.tools.cqp2vocab:cqp2vocab_main'
]},
install_requires=['setuptools>=17', 'cython>=0.19', 'six'],
package_dir={'': 'py_src'})
|
bogdanbabych/paralex4cfields
|
tests/cwb-python/setup.py
|
setup.py
|
py
| 2,041 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71969151547
|
from django.contrib import admin
from django.urls import path, re_path
from . import views
urlpatterns = [
path('',views.index,name="words-index"), #index homePage
path('words/',views.index,name="words-index"),#index homePage
path('random/',views.get_random,name="random"), #Random word
path('words/<str:word_name>/', views.detail, name='words-detail'), # detail page
path('words/add/<str:word_name>', views.add_word, name="words-add_word_details"),# add word page
path('add/', views.add_word, name="words-add_word_details"),
path('about/',views.about,name="about-page"), #about page
path('contact/',views.contact,name="contact"), #contact page
path('tag/',views.all_tags,name="all-tags-page"), #Case for empty tag entering
path('tag/<str:str_Tag>',views.tag_page,name="tag-detail-page"), #Tag page for words of a certain tag
path('tagList/', views.all_tags, name="all-tags-page"), #page where all tags are displayed
path('words/votes/<str:slug>/<str:direction>/',views.vote, name="vote"), # This view Manages votes
]
|
gystr/words
|
words/urls.py
|
urls.py
|
py
| 1,067 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9963863734
|
import numpy as np
import scipy.sparse as sp
import torch
import time
import random
from utils.tool import read_data, write_dic, dictionary, normalize, sparse_mx_to_torch_sparse_tensor
def encoding_test(test_graph_path, test_fact_path, train_dataset = "fb237_v1"):
"""load test-graph and test-facts, and do the encoding on the test-graph"""
t_start = time.time()
path = "data"
#these two paths are for loading
relation_dic_path = "{}/{}/train/relation-dic.txt".format(path, train_dataset)
type_dic_path = "{}/{}/train/type-dic.txt".format(path, train_dataset)
test_graph_triples = read_data(test_graph_path)
test_fact_triples_with_label = read_data(test_fact_path)
#load relation dic and type dic generated by training
f_relation_dic = open(relation_dic_path)
relations = []
for line in f_relation_dic:
relation_new = line.strip().split("\t")[1]
relations.append(relation_new)
f_type_dic = open(type_dic_path)
types = []
for line in f_type_dic:
type_new = line.strip().split("\t")[1]
types.append(type_new)
relation_set = set(relations)
all_triples_with_label = test_graph_triples + test_fact_triples_with_label
test_graph_real_triples = []
test_graph_type_triples = []
for triple in test_graph_triples:
if triple[1] != "<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>":
test_graph_real_triples.append(triple)
else:
test_graph_type_triples.append(triple)
test_fact_real_triples_with_label = []
test_fact_type_triples_with_label = []
for triple in test_fact_triples_with_label:
if triple[1] != "<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>":
test_fact_real_triples_with_label.append(triple)
else:
test_fact_type_triples_with_label.append(triple)
all_real_triples_with_label = []
all_type_triples_with_label = []
constant_set = set()
for triple in all_triples_with_label:
if triple[1] != "<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>":
constant_set.add(triple[0])
constant_set.add(triple[2])
all_real_triples_with_label.append(triple)
else:
constant_set.add(triple[0])
all_type_triples_with_label.append(triple)
constants = list(constant_set)
constant2index = dictionary(constants)
relation2index = dictionary(relations)
type2index = dictionary(types)
#print("time:",time.time()-t_start)
#generate list of pairs for encoding
pairs = []
pair_set = set()
for triple in all_real_triples_with_label:
sub_idx = constant2index[triple[0]]
obj_idx = constant2index[triple[2]]
if sub_idx < obj_idx:
if (sub_idx, obj_idx) not in pair_set:
pair_set.add((sub_idx, obj_idx))
pairs.append((sub_idx, obj_idx))
if sub_idx > obj_idx:
if (obj_idx, sub_idx) not in pair_set:
pair_set.add((obj_idx, sub_idx))
pairs.append((obj_idx, sub_idx))
for constant_idx in range(len(constants)):
pairs.append((constant_idx, constant_idx))
pair_set.add((constant_idx, constant_idx))
pair2index = dictionary(pairs)
s_time = time.time()
#collect related pairs for each constant
pairs_for_constant = dict([(i,set()) for i in range(len(constants))])
p_idx = 0
for pair in pairs:
p_idx = pair2index[pair]
c1 = pair[0]
c2 = pair[1]
pairs_for_constant[c1].add(p_idx)
pairs_for_constant[c2].add(p_idx)
#collect neighbors for each pair node
pneighbors_for_pair = dict([(i,set()) for i in range(len(pairs))])
for c_idx in range(len(constants)):
pairs_c = set(pairs_for_constant[c_idx])
#pair and n_pair would contain one common constant
for pair in pairs_c:
for n_pair in pairs_c:
if pair != n_pair:
pneighbors_for_pair[pair].add(n_pair)
#generate edge list
edges = []
for i in range(len(pairs)):
pneighbors = pneighbors_for_pair[i]
for pneighbor in pneighbors:
edges.append([i, pneighbor])
edges.append([pneighbor, i])
#print("Finished generating edges", time.time() - s_time)
#generate a normalized adjencency matrix (strategy for GCN)
#print(edges)
edges = np.array(edges)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])), shape=(len(pairs), len(pairs)), dtype=np.float32)
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
adj = normalize(adj + sp.eye(adj.shape[0]))
adj = sparse_mx_to_torch_sparse_tensor(adj)
del edges
#print("Total time for adj: {:.4f}s".format(time.time() - s_time))
#print("Start to generate features, labels, and masks")
def initialize(test_graph_real_triples, test_graph_type_triples, test_fact_real_triples_with_label, test_fact_type_triples_with_label):
labels = torch.zeros(len(pairs), len(types) + 2*len(relations))
masks = torch.zeros(len(pairs), len(types) + 2*len(relations))
features = torch.zeros(len(pairs), len(types) + 2*len(relations))
#labels and masks are generated for all triples in test-facts (pos&neg)
for triple in test_fact_type_triples_with_label:
cons = triple[0]
typ = triple[2]
label = triple[3]
pair_idx= pair2index[(constant2index[cons], constant2index[cons])]
typ_idx = type2index[typ]
if label == "1":
labels[pair_idx][typ_idx] = 1
elif label == "0":
labels[pair_idx][typ_idx] = 0
masks[pair_idx][typ_idx] = 1
for triple in test_fact_real_triples_with_label:
sub = triple[0]
rel = triple[1]
obj = triple[2]
label = triple[3]
sub_idx = constant2index[sub]
rel_idx = relation2index[rel]
obj_idx = constant2index[obj]
try:
pair_idx = pair2index[(sub_idx, obj_idx)]
except:
pair_idx = pair2index[(obj_idx, sub_idx)]
rel_idx = rel_idx + len(relations)
if label == "1":
labels[pair_idx][len(types) + rel_idx] = 1
elif label == "0":
labels[pair_idx][len(types) + rel_idx] = 0
masks[pair_idx][len(types) + rel_idx] = 1
#features are generated for all triples in test-graph (pos&neg)
for triple in test_graph_type_triples:
cons = triple[0]
typ = triple[2]
pair_idx= pair2index[(constant2index[cons], constant2index[cons])]
typ_idx = type2index[typ]
features[pair_idx][typ_idx] = 1
for triple in test_graph_real_triples:
sub = triple[0]
rel = triple[1]
obj = triple[2]
sub_idx = constant2index[sub]
rel_idx = relation2index[rel]
obj_idx = constant2index[obj]
try:
pair_idx = pair2index[(sub_idx, obj_idx)]
except:
pair_idx = pair2index[(obj_idx, sub_idx)]
rel_idx = rel_idx + len(relations)
features[pair_idx][len(types) + rel_idx] = 1
features.requires_grad = True
labels.requires_grad = False
return features, labels, masks
features, labels, masks = initialize(test_graph_real_triples, test_graph_type_triples, test_fact_real_triples_with_label, test_fact_type_triples_with_label)
num_type = len(types)
num_relation = len(relations)
def triple2index(triple_now):
sub_idx = constant2index[triple_now[0]]
try:
relation_idx = relation2index[triple_now[1]]
except:
pair_idx = pair2index[(sub_idx, sub_idx)]
dim_idx = type2index[triple_now[2]]
return pair_idx, dim_idx
obj_idx = constant2index[triple_now[2]]
if (sub_idx, obj_idx) in pair_set:
pair_idx = pair2index[(sub_idx, obj_idx)]
dim_idx = len(types) + relation_idx
elif (obj_idx, sub_idx) in pair_set:
pair_idx = pair2index[(obj_idx, sub_idx)]
dim_idx = len(types) + len(relations) + relation_idx
else:
print(triple_now, sub_idx, relation_idx, obj_idx)
print("wrong")
return pair_idx, dim_idx
hits_true = []
for triple in test_fact_triples_with_label:
if triple[-1] == "1":
hits_true.append(triple2index(triple))
#print("Finished generation")
#print("Total time elapsed for encoding: {:.4f}s".format(time.time() - t_start))
return adj, features, labels, masks, num_type, num_relation, constants, relations, types, pairs, hits_true
|
shuwen-liu-ox/INDIGO
|
utils/utils_test_pattern.py
|
utils_test_pattern.py
|
py
| 9,170 |
python
|
en
|
code
| 22 |
github-code
|
6
|
40411376601
|
#!/usr/bin/env python3
"""
Name: locator_led_status.py
Description: NXAPI: display locator-led status for chassis, modules, fans
Example output:
% ./locator_led_status.py --vault hashicorp --devices cvd_bgw_1 --module 1,2 --fan 1,2
ip hostname status locator-led
192.168.11.110 cvd-1111-bgw ON chassis
192.168.11.110 cvd-1111-bgw OFF module_1
192.168.11.110 cvd-1111-bgw ON module_2
192.168.11.110 cvd-1111-bgw ON fan_1
192.168.11.110 cvd-1111-bgw OFF fan_2
%
"""
our_version = 106
script_name = "locator_led_status"
# standard libraries
import argparse
import re
from concurrent.futures import ThreadPoolExecutor
# local libraries
from nxapi_netbox.args.args_cookie import ArgsCookie
from nxapi_netbox.args.args_nxapi_tools import ArgsNxapiTools
from nxapi_netbox.general.log import get_logger
from nxapi_netbox.netbox.netbox_session import netbox, get_device_mgmt_ip
from nxapi_netbox.vault.vault import get_vault
from nxapi_netbox.nxapi.nxapi_locator_led import NxapiLocatorLedStatus
def get_parser():
ex_prefix = "Example:"
help_module = (
"Either a single module/linecard, or a comma-separate list of modules/linecards"
)
help_fan = "Either a single fan, or a comma-separate list of fans"
help_on = "If present, print only locator-leds whose status is ON. If not present, print status for all locator-leds"
ex_module = "{} --module 2,3,6".format(ex_prefix)
ex_fan = "{} --fan 3".format(ex_prefix)
ex_on = "{} --on".format(ex_prefix)
parser = argparse.ArgumentParser(
description="DESCRIPTION: NXAPI: display locator-led status for chassis, modules, fans",
parents=[ArgsCookie, ArgsNxapiTools],
)
mandatory = parser.add_argument_group(title="MANDATORY SCRIPT ARGS")
optional = parser.add_argument_group(title="OPTIONAL SCRIPT ARGS")
optional.add_argument(
"--on",
dest="on",
required=False,
action="store_true",
default=False,
help="{} {}".format(help_on, ex_on),
)
optional.add_argument(
"--module",
dest="module",
required=False,
default=None,
help="(default: %(default)s) " + help_module + ex_module,
)
optional.add_argument(
"--fan",
dest="fan",
required=False,
default=None,
help="(default: %(default)s) " + help_fan + ex_fan,
)
parser.add_argument(
"--version", action="version", version="{} v{}".format("%(prog)s", our_version)
)
return parser.parse_args()
def get_device_list():
try:
return cfg.devices.split(",")
except:
log.error(
"exiting. Cannot parse --devices {}. Example usage: --devices leaf_1,spine_2,leaf_2".format(
cfg.devices
)
)
exit(1)
def print_output(futures):
for future in futures:
output = future.result()
if output == None:
continue
for line in output:
print(line)
if len(output) > 0:
print()
def print_header():
print(fmt.format("ip", "hostname", "status", "locator-led"))
def collect_output(ip, nx, modules, fans):
lines = list()
if not cfg.on:
lines.append(fmt.format(ip, nx.hostname, nx.chassis, "chassis"))
elif cfg.on and nx.chassis == "ON":
lines.append(fmt.format(ip, nx.hostname, nx.chassis, "chassis"))
for module in modules:
nx.module = module
if cfg.on and nx.module_status != "ON":
continue
lines.append(
fmt.format(ip, nx.hostname, nx.module_status, "module_{}".format(module))
)
for fan in fans:
nx.fan = fan
if cfg.on and nx.fan_status != "ON":
continue
lines.append(fmt.format(ip, nx.hostname, nx.fan_status, "fan_{}".format(fan)))
return lines
def worker(device, vault, modules, fans):
ip = get_device_mgmt_ip(nb, device)
nx = NxapiLocatorLedStatus(vault.nxos_username, vault.nxos_password, ip, log)
nx.nxapi_init(cfg)
nx.refresh()
return collect_output(ip, nx, modules, fans)
def cfg_to_list(cfg_list, desc):
_list = list()
if cfg_list == None:
return _list
for item in re.split(",", str(cfg_list)):
if item == None:
continue
try:
_list.append(int(item))
except:
log.error("Exiting. Expected int() for {}. Got {}".format(desc, cfg_list))
log.error("Usage examples:")
log.error(" --{} 3".format(desc))
log.error(" --{} 1,2,4".format(desc))
exit(1)
return _list
cfg = get_parser()
modules = cfg_to_list(cfg.module, "module")
fans = cfg_to_list(cfg.fan, "fan")
log = get_logger(script_name, cfg.loglevel, "DEBUG")
vault = get_vault(cfg.vault)
vault.fetch_data()
nb = netbox(vault)
devices = get_device_list()
fmt = "{:<15} {:<18} {:<6} {:<12}"
print_header()
executor = ThreadPoolExecutor(max_workers=len(devices))
futures = list()
for device in devices:
args = [device, vault, modules, fans]
futures.append(executor.submit(worker, *args))
print_output(futures)
|
allenrobel/nxapi-netbox
|
scripts/locator_led_status.py
|
locator_led_status.py
|
py
| 5,240 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36413210728
|
# ---------------
# ParamCopy - Substance 3D Designer plugin
# (c) 2019-2022 Eyosido Software SARL
# ---------------
from sd.api.sdpackage import SDPackage
from sd.api.sdnode import SDNode
from sd.api.sdgraph import SDGraph
from sd.api.sdarray import SDArray
from sd.api.sdproperty import SDPropertyCategory
from sd.api.apiexception import APIException
from paramcopy.pccore.pchelper import PCHelper
from paramcopy.pccore.pcnodeid import PCNodeIdentifier
from paramcopy.pccore.pcparam import PCParam, PCParamCollection
class PCNodeState:
def __init__(self, node, storeBaseParams = True, storeSpecificParams = True, graph = None):
self.nodeIdentifier = PCNodeIdentifier(node, graph)
self.state = PCParamCollection()
def recallInto(self, destNode, copyBaseAndSpecific = True, propertyIds = None):
for propertyId, propertyData in self.state.params.items():
if not propertyIds or (propertyIds and propertyId in propertyIds): # filter properties
isBaseParam = PCHelper.isBaseParameter(propertyId)
if copyBaseAndSpecific or isBaseParam:
# verify whether property exists in destination node
try:
destVal = destNode.getInputPropertyValueFromId(propertyId)
if destVal:
destProp = destNode.getPropertyFromId(propertyId, SDPropertyCategory.Input)
isFunctionDriven = PCHelper.isInputParamFunctionDriven(destNode, destProp)
if not isFunctionDriven: # make sure not to copy over a user function
if propertyData.inheritanceMethod != -1:
#inheritance method is to be set *before* property value
destNode.setInputPropertyInheritanceMethodFromId(propertyData.id, propertyData.inheritanceMethod)
destNode.setInputPropertyValueFromId(propertyData.id, propertyData.value)
except APIException as e:
PCHelper.logSDException(e)
finally:
pass
def retrieveNode(self):
return self.nodeIdentifier.retrieveNode()
def storeState(self, node, storeBaseParams = True, storeSpecificParams = True, propertyIds = None):
#if propertyIds are defined, only those will be stored regardless of storeBaseParams/storeSpecificParams
properties = node.getProperties(SDPropertyCategory.Input)
if properties:
p = 0
psize = properties.getSize()
while p < psize:
prop = properties.getItem(p)
if prop.getType().getClassName() != "SDTypeTexture": # do not process node inputs
propertyId = prop.getId()
isBaseParam = PCHelper.isBaseParameter(propertyId)
if (propertyIds and propertyId in propertyIds) or \
(not propertyIds and \
( (isBaseParam and storeBaseParams) or (not isBaseParam and storeSpecificParams) ) \
):
if not PCHelper.isHiddenParam(node, propertyId):
groupName = PCHelper.getParamGroupName(node, propertyId)
inheritanceMethod = PCHelper.getInheritanceMethod(node, propertyId)
value = node.getPropertyValue(prop) # PCHelper.newPropertyValue(node, prop) ??
param = PCParam(propertyId, prop.getLabel(), inheritanceMethod, value, groupName)
self.state.params[propertyId] = param
p += 1
class PCNodeStateSet:
def __init__(self, graph, stateSetName):
self.graphName = graph.getIdentifier()
package = graph.getPackage()
self.packageName = PCHelper.getPackageName(package)
self.id = PCHelper.getPackageId(package) + "_" + graph.getIdentifier() + "_" + stateSetName
self.name = stateSetName
self.nodeStates = []
def storeNodeStates(self, nodeArray, graph, storeBaseParams = True, storeSpecificParams = True):
size = nodeArray.getSize()
for n in range(0, size):
node = nodeArray.getItem(n)
nodeState = PCNodeState(node, graph)
nodeState.storeState(node, storeBaseParams, storeSpecificParams)
self.nodeStates.append(nodeState)
def recallNodeStates(self):
misses = 0
for nodeState in self.nodeStates:
node = nodeState.retrieveNode()
if node:
nodeState.recallInto(node)
else:
misses += 1
return misses
class PCStateMgr:
"""
Store sets of node states for later recall
"""
inst = None
@classmethod
def instance(cls):
if not cls.inst:
cls.inst = PCStateMgr()
return cls.inst
def __init__(self):
self.nodeStateSets = {} # key: state set name, value, PCNodeStateSet
def stateSetNameExists(self, stateSetName):
return self.nodeStateSets.get(stateSetName) != None
def addStateSet(self, stateSet):
self.nodeStateSets[stateSet.name] = stateSet
def deleteStateSet(self, stateSetName):
if self.nodeStateSets.get(stateSetName):
del self.nodeStateSets[stateSetName]
return True
else:
return False
def deleteAll(self):
self.nodeStateSets = {}
|
eyosido/ParamCopy
|
src/paramcopy/pccore/pcstatemgr.py
|
pcstatemgr.py
|
py
| 5,587 |
python
|
en
|
code
| 9 |
github-code
|
6
|
22195221109
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import numpy as np
from btk import btk
import os
import matplotlib
import matplotlib.pyplot as plt
from tkinter import *
from tkinter.messagebox import *
from tkinter import ttk
# Label = strike / off ; context = droite / gauche
def filtreExtremum(extrem, originalData):
if 0 in extrem:
extrem = extrem[1:]
if len(originalData)-1 in extrem:
extrem = extrem[:-1]
return extrem
# But : trouvรฉ tous les maximum locaux
# In : un vecteur de taille nx1
# Out : les positions x des max locaux (pas leur valeur y)
def maxLocal(a):
TFarray = np.array(np.r_[True, a[1:] > a[:-1]] & np.r_[a[:-1] > a[1:], True]) # Rempli un vecteur avec que des False, sauf lorsqu'une donnรฉe dans le vecteur est plus grande que son voisin de droite et de gauche (il met alors True)
indMax = np.ravel( np.where( TFarray == True ) ) # On rรฉcupรจre les index oรน il y a les True
indMax = filtreExtremum(indMax, a)
return indMax
# Fonctions en cours, pas encore effective
def semiMaxLocal(a):
TFarray = np.array(np.r_[True, a[1:] > a[:-1]] & np.r_[a[:-1] == a[1:], True])
indSemiMax = np.where( TFarray == True )
return indSemiMax
def findMinMin(data, Min):
minMin = minLocal(data[Min])
return Min[minMin]
# Pareil que maxLocal, mais pour trouver les minimum locaux
def minLocal(a):
TFarray = np.array(np.r_[True, a[1:] < a[:-1]] & np.r_[a[:-1] < a[1:], True])
indMin = np.ravel( np.where( TFarray == True ) )
indMin = filtreExtremum(indMin, a)
return indMin
#clean the arrays of all local extremum that are too close of eachother (extremums super local)
def cleanMinMax(indMin, indMax):
for i in indMax:
for j in np.flip(indMin,0):
if(np.abs(i-j)<7):
indMax = np.extract(indMax!=i,indMax)
indMin = np.extract(indMin!=j,indMin)
break
return indMin, indMax
# Dico avec comme clรฉ les labels, et comme valeur un entier
# e.g. : DicoLabels = {"LSHO" = 0, "RSHO" = 1, "RANK" = 2, ...}
# Fonction jamais utilisรฉ pour l'instant
def dicLab(metadata):
point_labels = metadata.FindChild("POINT").value().FindChild("LABELS").value().GetInfo().ToString()
dicoLabels = {}
index = 0
for lab in point_labels:
dicoLabels[lab] = index
index += 1
return dicoLabels
# Plot les events dans la figure "ax", lignes verticales
# In : acq, qui contient les events ; ax, oรน on va ploter les lignes verticales
# Out : la nouvelle figure, oรน on a plotรฉ les lignes
def plotEvent(acq, ax):
n_events = acq.GetEventNumber() # On rรฉcupรจre le nombre d'รฉvรจnements, pour les parcourirs
for numevent in range(n_events): # On parcours les indices des รฉvรจnements
event = acq.GetEvent(numevent) # On rรฉcupรจre un รฉvรจnement, grรขce ร son indice correspondant
event_frame = event.GetFrame() # On rรฉcupรจre la frame oรน se situe l'รฉvรจnement
context = event.GetContext() # On rรฉcupรจre le context (e.g. Left ou Right)
label = event.GetLabel() # On rรฉcupรจre le label (e.g. : Foot_Strike_GS)
if context == 'Left': # Test si c'est le pied gauche
if label == 'Foot_Strike_GS': # Test si c'est quand le pied touche le sol
leftLineStrike = ax.axvline(x = event_frame, color='r', label='Left - Strike', linestyle='--') # Plot en rouge, avec des tirets
# ax.legend([leftLineStrike], 'Left - Strike')
elif label == 'Foot_Off_GS': # Test si c'est quand le pied ne touche plus le sol
leftLineOff = ax.axvline(x = event_frame, color='r', label='Left - Off', linestyle='-.') # Plot en rouge, avec des tirets et des points
if context == 'Right': # Test si c'est le pied droit
if label == 'Foot_Strike_GS': # Test si c'est quand le pied touche le sol
rightLineStrike = ax.axvline(x = event_frame, color='g', label='Righ - Strike', linestyle='--') # Plot en vert, avec des tirets
elif label == 'Foot_Off_GS': # Test si c'est quand le pied ne touche plus le sol
rightLineOff = ax.axvline(x = event_frame, color='g', label='Right - Off', linestyle='-.') # Plot en rouge, avec des tirets et des points
# On rajoute la lรฉgende
# S'IL Y A UNE ERREUR, ENLEVER CETTE LIGNE
# ax.legend((leftLineOff, rightLineStrike, rightLineOff), ('Left - Off', 'Right - Strike', 'Right - Off'))
return ax
# Selectionne les รฉlรฉments de files ayant un event correspondant au label et au contexte
# Renvoie en training set (3/4) et un testing set (1/4) constituรฉs de ces รฉlรฉments.
def selectWithExistingEvent(files, lab, cont):
eventfiles = []
for acq in files:
n_events = acq.GetEventNumber() # On rรฉcupรจre le nombre d'รฉvรจnements, pour les parcourirs
for numevent in range(n_events): # On parcours les indices des รฉvรจnements
event = acq.GetEvent(numevent) # On rรฉcupรจre un รฉvรจnement, grรขce ร son indice correspondant
if event.GetLabel() == lab and event.GetContext()==cont: # Test si c'est le label recherchรฉ
eventfiles.append(acq)
break
test = np.random.choice(eventfiles, (len(eventfiles)//4), replace = False).tolist()
train = list(set(eventfiles)-set(test))
return train, test
# But : Rรฉcupรฉrer les donnรฉes
# In : path des donnรฉes (Attention : le chemin commence de lร oรน est le fichier)
# Out : les donnรฉes
def initial(pathFile):
reader = btk.btkAcquisitionFileReader()
reader.SetFilename(pathFile)
reader.Update()
acq = reader.GetOutput()
return acq
def save(acq, pathFile):
writer = btk.btkAcquisitionFileWriter()
writer.SetInput(acq)
writer.SetFilename(pathFile)
writer.Update()
def allFiles(path):
files = []
# Pour trouver tous les fichiers .c3d
for r, d, f in os.walk(path):
for file in f:
if '.c3d' in file:
files.append(initial(os.path.join(r, file)))
return files
# But : avoir des infos ร propos des frames de "acq"
# In : les donnรฉes acq
# Out : nombres de frames, numรฉro de la 1รจre frame, numรฉro de la derniรจre frame
def frameData(acq):
# get some parameters
n_frames = acq.GetPointFrameNumber() # give the number of frames
first_frame = acq.GetFirstFrame()
last_frame = acq.GetLastFrame()
return n_frames, first_frame, last_frame
# But : crรฉer un nouvel รฉvรจnement
# Un nouvel รฉvรจnement est caractรฉrisรฉ par un label, un context, et un numรฉro de frame
# In : les donnรฉes "acq", un label, un context, et une frame
def addEvent(acq, label, context, frameNumber):
newEvent = btk.btkEvent() # Crรฉer un nouvel รฉvรจnement vide
newEvent.SetLabel(label) # Met un label
newEvent.SetContext(context) # Met un context
newEvent.SetFrame(frameNumber) # Met la positoin, la frame
acq.AppendEvent(newEvent) # Rajoute l'รฉvรจnement parmi tous les autres รฉvรจnements
# But : รฉquivalent ร print('obj = ', obj)
# Pas nรฉcessaire pour le projet
def printName(obj, namespace):
nom = [name for name in namespace if namespace[name] is obj]
print(nom[0],' = ', obj)
# But : Avoir toutes les infos d'un รฉvรจnements
# In : les donnรฉes "acq", et le numรฉro de l'รฉvรจnement
# Out : l'รฉvรจnement, le label, le context, et le num de la frame
def eventInfo(acq, numEvent):
event = acq.GetEvent(0) # extract the first event of the aquisition
label = event.GetLabel() # return a string representing the Label
context = event.GetContext() # return a string representing the Context
frame = event.GetFrame() # return the frame as an integer
return event, label, context, frame
# But : trouver l'รฉvรจnement le plus proche d'une position, frame donnรฉe
# In : des donnรฉes "data", l'ensemble des รฉvรจnements (AllEvents), le label et le context recherchรฉ , et la position depuis laquel on recherche
# Out : l'รฉvรจnement, et la frame cprrespondante
def closestEvent(data, AllEvents, label=0, context=0, start=1):
if (label == 0) and (context == 0):
return AllEvents.GetItem(0), AllEvents.GetItem(0).GetFrame()
eventVIP = [] # Array qui contiendra tous les รฉvรจnements correspondant au mรชme label et mรชme contexte que demandรฉ
numberEvent = AllEvents.GetItemNumber() # Nombre d'รฉvรจnements au total
for num in range(numberEvent): # On regarde tout les รฉvรจnement
event = AllEvents.GetItem(num) # On rรฉcupรจre un รฉvรจnement
if (event.GetContext() == context) and (event.GetLabel() == label): # Test si on a les mรชmes context et label
eventVIP.append(event) # On rajoute l'รฉvรจnement
if len(eventVIP) == 0: # Si on a trouvรฉ aucun รฉvรจnement recherchรฉ, on arrรชte
return 0, 0
dist = 1000 # On initialise une distance trรจs grande, qui diminuera
even = eventVIP[0] # On commence par le premier รฉvรจnement
for event in eventVIP: # On parcours les รฉvรจnements
if np.abs(event.GetFrame() - start) < dist: # On test si la distance entre la position de dรฉpart et un รฉvรจnement correspondant
dist = np.abs(event.GetFrame() - start) # On mรฉmorise la nouvel distance
even = event # On mรฉmorise le nouvel รฉvรจnement
return even, even.GetFrame()
# But : trouver l'extremum le plus proche d'une position de dรฉpart
# In : position de dรฉpart "start", les indices (position x) d'extremum (les min ou les max)
# Out : position x de l'extremum, la distance par rapport au point de dรฉpart (start), et l'indice dans l'array des min ou max
def closestExtrem(start, indExtrem): # Renvoie la position de l'extrem par rapport ร la frame Start
AllDistance = indExtrem - start # Soustraction d'un vecteur par un scalaire, ici les positions des indices moins la position de dรฉpart (start)
absDist = np.abs(AllDistance) # On met en valeur absolue
indexMinimDist = np.argmin(absDist) # On rรฉcupรจre l'indice de la distance minimale
positionExtrem = indExtrem[indexMinimDist] # On rรฉcupรจre la position x de l'extremum
distance = AllDistance[indexMinimDist] # On rรฉcupรจre la distance (sans la valeur absolue)
return positionExtrem, distance, indexMinimDist
def plotPosi(acq, position, axe, ax, event=0):
dicoAxe = {"x" : 0, "y" : 1, "z" : 2}
data = np.array(acq.GetPoint(position).GetValues()[:, dicoAxe[axe]])
n_frames, first_frame, last_frame = frameData(acq)
Min, Max = minLocal(data), maxLocal(data)
Min, Max = cleanMinMax(Min, Max) #used to clean some local extremums
# Plot part
ax.plot(np.array(range(first_frame, last_frame + 1)), data, 'k')
ax.plot(Min, data[Min], 'o b')
ax.plot(Max, data[Max], 'o', color='purple')
ax = plotEvent(acq, ax)
if (event != 0):
print('Position de depart :', event.GetFrame())
positionExtrem, distance, indexMinimDist = closestExtrem(event.GetFrame(), Max)
ax.plot(positionExtrem, data[positionExtrem], 'o g')
print('Position :', positionExtrem)
plt.title(" Position = {} - axis = {}".format(position, axe))
# ax.show(block = False)
return ax
def simple(files, posiCombo, axeCombo , buttonCombo, fileCombo):
posiCombo['values'] = ['LFHD', 'RFHD', 'LBHD', 'RBHD', 'C7', 'T10', 'STRN', 'CLAV', 'RBAK', 'LSHO', 'LELB', 'LWRA', 'LWRB', 'RSHO', 'RELB', 'RWRA', 'RWRB', 'LASI', 'RASI', 'LPSI', 'RPSI', 'LTHI', 'RTHI', 'LKNE', 'RKNE', 'LTIB', 'RTIB', 'LANK', 'RANK', 'LHEE', 'RHEE', 'RTOE', 'LTOE']
posiCombo.current(0)
buttonCombo["text"] = "PLOT"
buttonCombo["command"] = lambda: onePlot(files, posiCombo, axeCombo, fileCombo )
def double(files, posiCombo, axeCombo , buttonCombo, fileCombo):
posiCombo['values'] = ["FHD", "BHD", "SHO", "ELB", "WRA", "WRB", "ASI", "PSI", "THI", "KNE", "TIB", "ANK", "HEE", "TOE"]
posiCombo.current(0)
buttonCombo["text"] = "PLOT x2"
buttonCombo["command"] = lambda: twoPlot(files, posiCombo, axeCombo, fileCombo )
def onePlot (files, posiCombo, axeCombo, fileCombo ):
acq = files[int(fileCombo.get())] # voir le chapitre sur les รฉvรฉnements
n_frames, first_frame, last_frame = frameData(acq)
plt.figure(figsize=(9,7))
guiPlot = plt.subplot()
guiPlot = plotPosi(acq, posiCombo.get(), axeCombo.get(), guiPlot)
plt.show(block=False)
def twoPlot(files, posiCombo, axeCombo, fileCombo ): # voir le chapitre sur les รฉvรฉnements
acq = files[int(fileCombo.get())]
n_frames, first_frame, last_frame = frameData(acq)
dr = 'R' + posiCombo.get()
ga = 'L' + posiCombo.get()
plt.figure(figsize=(9,7))
guiPlot = plt.subplot(2,1,1)
guiPlot = plotPosi(acq, dr, axeCombo.get(), guiPlot)
guiPlot = plt.subplot(2,1,2)
guiPlot = plotPosi(acq, ga, axeCombo.get(), guiPlot)
plt.show(block=False)
def GUIplot(files):
acq = files[0]
metadata = acq.GetMetaData()
point_labels = list(metadata.FindChild("POINT").value().FindChild("LABELS").value().GetInfo().ToString())
win = Tk()
win.title("BTK Project")
# win.geometry("500x100")
ttk.Label(win, text="Choix du capteur").grid(column=1, row=0)
posiCombo = ttk.Combobox(win, values=point_labels)
posiCombo.grid(column=1, row=1)
ttk.Label(win, text="Choix de l'axe").grid(column=2, row=0)
axeCombo = ttk.Combobox(win, values=["x", "y", "z"])
axeCombo.grid(column=2, row=1)
ttk.Label(win, text="Choix du fichier").grid(column=0, row=0)
fileCombo = ttk.Combobox(win, values=list(range(len(files))))
fileCombo.grid(column=0, row=1)
posiCombo.current(newindex=28)
axeCombo.current(2)
fileCombo.current(0)
buttonCombo = Button (win, text="PLOT", command= lambda: onePlot(files, posiCombo, axeCombo, fileCombo ))
buttonCombo.grid(column=3, row=1)
v = IntVar()
# v.set(1)
R1 = Radiobutton(win, text="Plot unique", variable=v, value=1, command= lambda: simple(files, posiCombo, axeCombo , buttonCombo, fileCombo))
R1.grid(column=0, row=2)
R2 = Radiobutton(win, text="Double Plot", variable=v, value=2, command= lambda: double(files, posiCombo, axeCombo , buttonCombo, fileCombo))
R2.grid(column=1, row=2)
v.set(1)
win.mainloop()
|
staufga0/DM_Project
|
Source/file.py
|
file.py
|
py
| 14,754 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
2914350136
|
class Block:
"""
A block is the smallest unit of data in Meshcash
A block includes a list of transactions and knowledge regarding the view of the creating miner
"""
def __init__(self):
# The layer of which this block belongs to
self.layerId = None
# The public key of this block generating miner
# This will be used to reward the miner
self.minerPk = None
# Binary value of the weak coin protocol
self.weakCoinValue = None
# All recent blocks observed by the miner generating this block
# This list contains only blocks with in-degree 0 (that otherwise wouldn't appear in the recent blocks list)
self.viewHeads = []
# Subset of view edges declared valid by the hare protocol
self.validRecentBlocks = []
# A flag set to True if the block was created up to t_delta_coin time after layer creation
# When this flag is turned out the block "abstains" from block voting
self.beforeCoin = None
# A flag set to True if the block was created up to delta_time after layer creation
# When this flag is turned out the block "abstains" from block voting
self.earlyBlock = None
# List of included transactions
self.txs = []
# Proofs of work over the block contents
# This serves as a digital signature to assure data was not changed since finding the proofs of work
self.pow = None
def has_in_view(self, other_block):
"""
Returns true if current block points otherBlock
:param other_block:
:return:
"""
if other_block.layerId >= self.layerId:
return False
pointed_blocks = set(self.viewHeads).union(set(self.validRecentBlocks))
if other_block in pointed_blocks:
return True
# Very inefficient algorithm
return max([pointedBlock.has_in_view(other_block) for pointedBlock in pointed_blocks])
def is_syntactically_valid(self, pow_protocol, tmin):
"""
Returns True if the block syntactically valid, that is:
1. recursive: points to TMIN syntactically valid blocks in previous layer AND
2. has a valid proofs-of-work w.r.t. challenge and difficulty AND
3. all of its transactions are syntactically valid
:return:
"""
if self.layerId == 0:
# Genesis layer's blocks are always syntactically valid
return True
if not pow_protocol.verify_pow(self.pow):
return False
prev_layer_blocks = filter(lambda x: x.layerId + 1 == self.layerId, self.viewHeads)
prev_layer_valid_blocks = sum([block.is_syntactically_valid(pow_protocol) for block in prev_layer_blocks])
if prev_layer_valid_blocks < tmin:
# Block must point to at least tmin syntactically valid previous layer's blocks
return False
return True
def generate_block_id(self):
"""
Return the block's id based on all of its content
:return:
"""
# Use the proofs-of-work as the block's id
return self.pow
|
anon444/meshcash
|
src/DataSturcutres/Block.py
|
Block.py
|
py
| 3,265 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8469582185
|
from calculations import get_standings, get_current_track_data
from utils import get_player_name, get_telegram_name
from plots import timedelta_to_string
def info_about_current_weeks_ladder_changes(old_data, new_data):
new_data = get_current_track_data(new_data)
new_data = new_data[new_data["Origin"] == "Player"]
new_ladder = get_standings(new_data)
current_track = new_data["track_id"].unique()[0]
old_data = old_data[old_data["track_id"] == current_track]
old_data = old_data[old_data["Origin"] == "Player"]
old_ladder = get_standings(old_data)
player_overlap = list(set(new_ladder.index) & set(old_ladder.index))
new_ladder = new_ladder.loc[new_ladder.index.isin(player_overlap)]
old_ladder = old_ladder.loc[old_ladder.index.isin(player_overlap)]
changes = new_ladder.index != old_ladder.index
new_ladder = new_ladder[changes].reset_index().reset_index().set_index("Player")
old_ladder = old_ladder[changes].reset_index().reset_index().set_index("Player")
new_ladder["index_change"] = new_ladder["index"] - old_ladder["index"]
messages = []
for player in new_ladder.index.values:
overtakes = new_ladder.loc[player, "index_change"]
if not overtakes > 0:
continue
index = new_ladder.loc[player, "index"]
overtook = new_ladder[(new_ladder["index"] >= index-overtakes) & (new_ladder["index"] < index)].index.values
have_scored = old_data.loc[old_data["Origin"] == "Player", "Player"].unique()
overtook = ", ".join([get_telegram_name(p) for p in overtook if p in have_scored])
new_record = new_data.groupby(["Player", "track_id"])["Time"].min().loc[player, current_track]
messages.append(
f"{get_player_name(player)} scored a {timedelta_to_string(new_record)} and overtook {overtook}."
)
return messages
def info_about_new_times(old_data, new_data):
messages = []
new_entries_index = new_data[~new_data.isin(old_data)].dropna(how="all").index
new_entries = new_data.loc[new_entries_index]
for row_index, entry in new_entries.iterrows():
player_name = entry["Player"]
new_record = timedelta_to_string(entry["Time"])
track = entry["Track"]
message = f"{track}: {get_player_name(player_name)} scored a new record of {new_record}!"
messages.append(message)
return messages
|
Excidion/trackmania_nations_challenge_bot
|
messages.py
|
messages.py
|
py
| 2,399 |
python
|
en
|
code
| 1 |
github-code
|
6
|
45897381316
|
import os
from PIL import Image
class ImageUpscaler:
def __init__(self, image_path, scale_factor):
self.image_path = image_path
self.scale_factor = scale_factor
def upscale_image(self, image_file):
# Open the image
image = Image.open(image_file)
# Calculate the new dimensions
width, height = image.size
new_width = int(width * self.scale_factor)
new_height = int(height * self.scale_factor)
# Resize the image
upscaled_image = image.resize((new_width, new_height), Image.BICUBIC)
# Save the upscaled image
upscaled_folder = os.path.join(self.image_path, 'upscaled')
os.makedirs(upscaled_folder, exist_ok=True)
file_name = os.path.splitext(os.path.basename(image_file))[0]
save_path = os.path.join(upscaled_folder, f'{file_name}_upscaled.png')
upscaled_image.save(save_path)
# print(f"Upscaled image saved: {save_path}")
def upscale_images_in_directory(self):
# Get a list of all image files in the directory
image_files = [
os.path.join(self.image_path, file_name)
for file_name in os.listdir(self.image_path)
if file_name.endswith(('.jpg', '.jpeg', '.png'))
]
for image_file in image_files:
self.upscale_image(image_file)
if __name__ == '__main__':
directory_path = '../private_keys'
scale_factor = 4 # Increase the dimensions by a factor of 4
upscaler = ImageUpscaler(directory_path, scale_factor)
upscaler.upscale_images_in_directory()
|
huju-tub/visual-cryptography-generator
|
classes/image_upscaler.py
|
image_upscaler.py
|
py
| 1,598 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21095535591
|
import torch
import torch.nn as nn
import numpy as np
from torch.nn.functional import upsample, interpolate
from Spa_downs import *
import torch.nn.functional as F
from torch.autograd import Variable
import argparse
from torch.nn import init
import scipy.io as sio
import os
import random
class ReshapeTo2D(nn.Module):
def __init__(self):
super(ReshapeTo2D, self).__init__()
def forward(self,x):
return torch.reshape(x, (x.shape[0], x.shape[1], x.shape[2]*x.shape[3]))
class ReshapeTo3D(nn.Module):
def __init__(self):
super(ReshapeTo3D, self).__init__()
def forward(self,x):
return torch.reshape(x, (x.shape[0], x.shape[1], int(np.sqrt(x.shape[2])), int(np.sqrt(x.shape[2]))))
class TransDimen(nn.Module):
def __init__(self):
super(TransDimen, self).__init__()
def forward(self,x):
return torch.Tensor.permute(x,[0,2,1])
def channel_crop(data, position, length):
assert data.size(1) >= position + length, 'the cropped channel out of size.'
return data[:, position: position + length, :, :]
def ins (list_, data, index):
list_start = list_[:index]
list_start = [ Variable(i, requires_grad=False).type(torch.cuda.FloatTensor) for i in list_start]
data = [Variable(data, requires_grad=False).type(torch.cuda.FloatTensor)]
list_end = list_[index:]
list_end = [ Variable(i, requires_grad=False).type(torch.cuda.FloatTensor) for i in list_end]
return list_start + data + list_end
def to_gpu(data):
return Variable(data, requires_grad=False).type(torch.cuda.FloatTensor)
class L_Dspec(nn.Module):
def __init__(self,in_channel,out_channel,P_init):
super(L_Dspec, self).__init__()
self.in_channle = in_channel
self.out_channel = out_channel
self.P = nn.Parameter(P_init)
def forward(self,input):
S = input.shape
out = torch.reshape(input,[S[0],S[1],S[2]*S[3]])
out = torch.matmul(self.P,out)
return torch.reshape(out,[S[0],self.out_channel,S[2],S[3]])
def add_wgn(x, snr):
P_signal=torch.sum(x.abs()**2)
P_noise = P_signal/10**(snr/10.0)
sigma = torch.sqrt(P_noise/x.numel())
noise = torch.randn(x.shape).type(torch.cuda.FloatTensor)
return x + sigma * noise
def tensor_copy(x):
return x.clone()
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument('--model' , default='MSDANet', help='MSDANet')
parser.add_argument('--fusion' , default='Concate', help='Concate')
parser.add_argument('--lr' , default=1e-4, type=float, help='learning rate for optimizer')
parser.add_argument('--batch_size', default=16, type=int, help='batch size for training')
parser.add_argument('--factor' , default=8, type=int, help='scale factor. 4/8/16')
parser.add_argument('--dataset' , default='Houston', help='Houston/PaviaU/dc/PaviaC')
parser.add_argument('--patch_size', default=64, type=int, help='patch size of training')
parser.add_argument('--stride' , default=32, type=int, help='stride of training')
parser.add_argument('--pan' , action='store_true', help='pan_sharpening or MSI + HSI')
parser.add_argument('--mem_load' , action='store_true', help='load the all dataset into memory or disk')
parser.add_argument('--phase' , default='train', help='train/test')
parser.add_argument('--noise' , action='store_true', help='wheater to add noise to LR_HSI and HR_MSI')
return parser.parse_args()
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
init.kaiming_normal(m.weight.data)
def split(full_list,shuffle=False,ratio=0.2):
n_total = len(full_list)
offset = int(n_total * ratio)
if n_total==0 or offset<1:
return [],full_list
random.seed(4)
if shuffle:
random.shuffle(full_list)
sublist_1 = full_list[:offset]
sublist_2 = full_list[offset:]
return sublist_1,sublist_2
def all_data_in(Path='Data/Houston/', datasets='Houston', Train_image_num=10):
names = get_img_name(Path=Path, datasets=datasets)
allData = []
for i in range(Train_image_num):
Data = sio.loadmat(os.path.join(Path, names[i])+'.mat')
HSI = Data['hsi']
HSI = HSI.transpose((2, 0, 1))
allData.append(HSI)
return allData
dataset_dict = dict(
PaviaC = [10, 5, 300, 8000, 102, 1, (55, 41, 12)], ### [train_img_num, val_img_num, stop epoch, max_value, band_number, RGB]
PaviaU = [10, 5, 300, 8000, 103, 1, (46, 27, 10)],
Houston = [3, 2, 300, 65535, 144, 1, (65, 51, 22)],
dc = [11, 5, 300, 65535, 191, 4, (51, 35, 21)],
)
def get_img_name(Path='Data/Houston/', datasets='Houston'):
names_PaviaC_list = [
'PaviaC_01', 'PaviaC_02', 'PaviaC_03', 'PaviaC_04', 'PaviaC_05', 'PaviaC_06',
'PaviaC_07', 'PaviaC_08', 'PaviaC_09', 'PaviaC_10', 'PaviaC_11', 'PaviaC_12',
'PaviaC_13', 'PaviaC_14', 'PaviaC_15'
]
names_Houston_list = [
'Houston_01', 'Houston_02', 'Houston_03', 'Houston_04', 'Houston_05'
]
names_dc_list = [
'dc_01', 'dc_02', 'dc_03', 'dc_04',
'dc_05', 'dc_06', 'dc_07', 'dc_08',
'dc_09', 'dc_10', 'dc_11', 'dc_12',
'dc_13', 'dc_14', 'dc_15', 'dc_16',
]
names_PaviaU_list = [
'PaviaU_01', 'PaviaU_02', 'PaviaU_03', 'PaviaU_04', 'PaviaU_05', 'PaviaU_06',
'PaviaU_07', 'PaviaU_08', 'PaviaU_09', 'PaviaU_10', 'PaviaU_11', 'PaviaU_12',
'PaviaU_13', 'PaviaU_14', 'PaviaU_15'
]
names_Houston, names_Houston_valid = split(names_Houston_list, shuffle=True, ratio=0.6)
names_dc, names_dc_valid = split(names_dc_list, shuffle=True, ratio=0.7)
names_PaviaU, names_PaviaU_valid = split(names_PaviaU_list, shuffle=True, ratio=0.67)
names_PaviaC, names_PaviaC_valid = split(names_PaviaC_list, shuffle=True, ratio=0.67)
if datasets == 'PaviaC':
names = names_PaviaC
elif datasets == 'PaviaC_val':
names = names_PaviaC_valid
elif datasets == 'PaviaU':
names = names_PaviaU
elif datasets == 'PaviaU_val':
names = names_PaviaU_valid
elif datasets == 'Houston':
names = names_Houston
elif datasets == 'Houston_val':
names = names_Houston_valid
elif datasets == 'dc':
names = names_dc
elif datasets == 'dc_val':
names = names_dc_valid
else:
assert 'wrong dataset name'
return names
|
pyguan88/MDA-Net
|
function.py
|
function.py
|
py
| 6,689 |
python
|
en
|
code
| 8 |
github-code
|
6
|
39472033874
|
from paystackapi.paystack import Paystack
from paystackapi.transaction import Transaction
from paystackapi.verification import Verification
paystack_secret_key = "sk_test_a18b4a0dcad6d60a03b5be78a47e14f8d28686ce"
paystack_public_key = "pk_test_80c9e3e62c12dca2e7a51baaccf342279ffa8f1a"
paystack = Paystack(secret_key=paystack_secret_key)
paramz = '9sxzb9weo8'
details = Transaction.verify(reference=paramz)
status = details['data']['status']
print(details)
print(status)
|
gidex19/signacode
|
my_app/pay.py
|
pay.py
|
py
| 474 |
python
|
en
|
code
| 0 |
github-code
|
6
|
485113359
|
import pytest
from graph_pkg.edit_cost.edit_cost_proteins_tu import EditCostProteinsTU
from graph_pkg.graph.label.label_node_proteins_tu import LabelNodeProteinsTU
from graph_pkg.graph.node import Node
@pytest.mark.parametrize('coord1, e_cost, expected',
[
((1,), (1., 1., 1., 1., 'dirac'), 1.),
((0,), (1., 1., 1., 1., 'dirac'), 1.),
((2,), (1., 1., 1., 1., 'dirac'), 1.),
((0,), (11., 1., 1., 1., 'dirac'), 11.),
((0,), (1., 1.9, 1.9, 1.9, 'dirac'), 1.),
])
def test_dirac_proteins_tu_add_node(coord1, e_cost, expected):
node0 = Node(0, LabelNodeProteinsTU(*coord1))
edit_cost = EditCostProteinsTU(*e_cost)
result = edit_cost.cost_insert_node(node0)
assert result == expected
@pytest.mark.parametrize('coord1, e_cost, expected',
[
((1,), (1., 1., 1., 1., 'dirac'), 1.),
((0,), (1., 1., 1., 1., 'dirac'), 1.),
((1,), (16., 12., 18., 17., 'dirac'), 12.),
])
def test_dirac_proteins_tu_delete_node(coord1, e_cost, expected):
node0 = Node(0, LabelNodeProteinsTU(*coord1))
edit_cost = EditCostProteinsTU(*e_cost)
result = edit_cost.cost_delete_node(node0)
assert result == expected
@pytest.mark.parametrize('coord1, coord2, e_cost, expected',
[
((1,), (1,), (1., 1., 1., 1., 'dirac'), 0.),
((0,), (1,), (1., 1., 1., 1., 'dirac'), 2.),
((1,), (0,), (1., 1., 1., 1., 'dirac'), 2.),
((1,), (2,), (3., 2., 2.5, 1., 'dirac'), 5.),
])
def test_dirac_proteins_tu_substitution(coord1, coord2, e_cost, expected):
node0 = Node(0, LabelNodeProteinsTU(*coord1))
node1 = Node(1, LabelNodeProteinsTU(*coord2))
edit_cost = EditCostProteinsTU(*e_cost)
result = edit_cost.cost_substitute_node(node0, node1)
assert result == expected
|
CheshireCat12/graph_project
|
tests/unit_edit_cost/test_edit_cost_proteins_tu.py
|
test_edit_cost_proteins_tu.py
|
py
| 2,168 |
python
|
en
|
code
| 1 |
github-code
|
6
|
16480507143
|
"""Original from https://github.com/yhenon/pytorch-retinanet"""
import torch
import torch.nn as nn
import numpy as np
import skimage.io
import skimage.transform
import skimage.color
import skimage
from PIL import Image
def comptue_dim(dim, padding, kernel_size, stride):
return np.floor((dim + 2*padding - kernel_size) / stride) + 1
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BBoxTransform(nn.Module):
def __init__(self, mean=None, std=None):
super(BBoxTransform, self).__init__()
if mean is None:
self.mean = torch.from_numpy(np.array([0, 0, 0, 0]).astype(np.float32)).cuda()
else:
self.mean = mean
if std is None:
self.std = torch.from_numpy(np.array([0.1, 0.1, 0.2, 0.2]).astype(np.float32)).cuda()
else:
self.std = std
def forward(self, boxes, deltas):
widths = boxes[:, :, 2] - boxes[:, :, 0]
heights = boxes[:, :, 3] - boxes[:, :, 1]
ctr_x = boxes[:, :, 0] + 0.5 * widths
ctr_y = boxes[:, :, 1] + 0.5 * heights
dx = deltas[:, :, 0] * self.std[0] + self.mean[0]
dy = deltas[:, :, 1] * self.std[1] + self.mean[1]
dw = deltas[:, :, 2] * self.std[2] + self.mean[2]
dh = deltas[:, :, 3] * self.std[3] + self.mean[3]
pred_ctr_x = ctr_x + dx * widths
pred_ctr_y = ctr_y + dy * heights
pred_w = torch.exp(dw) * widths
pred_h = torch.exp(dh) * heights
pred_boxes_x1 = pred_ctr_x - 0.5 * pred_w
pred_boxes_y1 = pred_ctr_y - 0.5 * pred_h
pred_boxes_x2 = pred_ctr_x + 0.5 * pred_w
pred_boxes_y2 = pred_ctr_y + 0.5 * pred_h
pred_boxes = torch.stack([pred_boxes_x1, pred_boxes_y1, pred_boxes_x2, pred_boxes_y2], dim=2)
return pred_boxes
class ClipBoxes(nn.Module):
def __init__(self, width=None, height=None):
super(ClipBoxes, self).__init__()
def forward(self, boxes, img):
batch_size, num_channels, height, width = img.shape
boxes[:, :, 0] = torch.clamp(boxes[:, :, 0], min=0)
boxes[:, :, 1] = torch.clamp(boxes[:, :, 1], min=0)
boxes[:, :, 2] = torch.clamp(boxes[:, :, 2], max=width)
boxes[:, :, 3] = torch.clamp(boxes[:, :, 3], max=height)
return boxes
class Resizer(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, image, annots, min_side=608, max_side=1024):
image = np.array(image)
annots = np.array([[*annot['bbox'], annot['category_id']] for annot in annots])
rows, cols, cns = image.shape
smallest_side = min(rows, cols)
# rescale the image so the smallest side is min_side
scale = min_side / smallest_side
# check if the largest side is now greater than max_side, which can happen
# when images have a large aspect ratio
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
# resize the image with the computed scale
image = skimage.transform.resize(image, (int(round(rows * scale)), int(round((cols * scale)))))
rows, cols, cns = image.shape
pad_w = 32 - rows % 32
pad_h = 32 - cols % 32
new_image = np.zeros((rows + pad_w, cols + pad_h, cns)).astype(np.float32)
new_image[:rows, :cols, :] = image.astype(np.float32)
annots[:, 4] = annots[:, 4] * scale
return Image.fromarray(np.uint8(new_image)), torch.from_numpy(annots), scale
class Augmenter(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample, flip_x=0.5):
if np.random.rand() < flip_x:
image, annots = sample['img'], sample['annot']
image = image[:, ::-1, :]
rows, cols, channels = image.shape
x1 = annots[:, 0].copy()
x2 = annots[:, 2].copy()
x_tmp = x1.copy()
annots[:, 0] = cols - x2
annots[:, 2] = cols - x_tmp
sample = {'img': image, 'annot': annots}
return sample
class Normalizer(object):
def __init__(self):
self.mean = np.array([[[0.485, 0.456, 0.406]]])
self.std = np.array([[[0.229, 0.224, 0.225]]])
def __call__(self, sample):
image, annots = sample['img'], sample['annot']
return {'img': ((image.astype(np.float32) - self.mean) / self.std), 'annot': annots}
class UnNormalizer(object):
def __init__(self, mean=None, std=None):
if mean == None:
self.mean = [0.485, 0.456, 0.406]
else:
self.mean = mean
if std == None:
self.std = [0.229, 0.224, 0.225]
else:
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
|
sebastiani/pytorch-attention-augmented-convolution
|
utils/utils.py
|
utils.py
|
py
| 7,137 |
python
|
en
|
code
| 18 |
github-code
|
6
|
27016970830
|
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
from django.utils.translation import ugettext_lazy as _
from GameFeb19_intro.models import add_currency, add_tokens, TRNSL_ERR_MSG, translated_languages
import csv
import random
author = 'Tatiana Mayskaya'
doc = """
Cognitive Reflection Test & IQ Test & GRE-based Test :: whatever counts as cognitive test
"""
class Constants(BaseConstants):
name_in_url = 'GameFeb19_questions_cognitive'
players_per_group = None
# this is done only to count the number of questions in the quiz
# (assuming Russian and English versions have the same number)
with open('GameFeb19_questions_cognitive/cognitive_en.csv') as file:
questions = list(csv.DictReader(file))
num_rounds = len(questions)
class Subsession(BaseSubsession):
def creating_session(self):
assert self.session.config['language'] in translated_languages, TRNSL_ERR_MSG
if self.round_number == 1:
if self.session.config['language'] == 'en':
with open('GameFeb19_questions_cognitive/cognitive_en.csv', encoding='utf-8-sig') as test_file:
self.session.vars['test_file_list'] = list(csv.DictReader(test_file))
else:
with open('GameFeb19_questions_cognitive/cognitive_ru.csv', encoding='utf-8-sig') as test_file:
self.session.vars['test_file_list'] = list(csv.DictReader(test_file))
for p in self.get_players():
p.random_questions()
self.session.vars['num_questions_CT'] = Constants.num_rounds
for p in self.get_players():
question_data = p.current_question()
p.question_id = question_data['id']
p.question = question_data['question']
p.solution = int(question_data['solution'])
if int(question_data['n_choices']) == 0:
p.solution_text = question_data['solution']
else:
p.solution_text = question_data['choice{}'.format(p.solution)]
p.participant.vars['questions_CT'] = []
def vars_for_admin_report(self):
players = []
for p in self.get_players():
players.append((p.participant.label, p.question, p.submitted_answer_text, p.solution_text,
p.get_is_correct_display()))
return {'players': players}
class Group(BaseGroup):
pass
class Player(BasePlayer):
question_id = models.IntegerField()
question = models.StringField()
solution = models.IntegerField()
solution_text = models.StringField()
submitted_answer = models.IntegerField()
submitted_answer_options = models.IntegerField(widget=widgets.RadioSelect)
submitted_answer_text = models.StringField()
is_correct = models.BooleanField(initial=False, choices=[[True, _('Yes')], [False, _('No')]])
def random_questions(self):
randomized_questions = random.sample(range(1, Constants.num_rounds + 1, 1), Constants.num_rounds)
self.participant.vars['questions_order_CT'] = randomized_questions
def current_question(self):
num = self.participant.vars['questions_order_CT'][self.round_number - 1]
return self.session.vars['test_file_list'][num - 1]
def check_correct(self):
question_data = self.current_question()
if int(question_data['n_choices']) > 0:
self.submitted_answer = self.submitted_answer_options
self.is_correct = (self.submitted_answer == self.solution)
if int(question_data['n_choices']) == 0:
self.submitted_answer_text = str(self.submitted_answer)
else:
self.submitted_answer_text = question_data['choice{}'.format(self.submitted_answer)]
self.participant.vars['questions_CT'].append(
(self.round_number, self.question, self.submitted_answer_text, self.solution_text,
self.get_is_correct_display()))
if self.is_correct:
self.payoff = self.session.vars['rate_CT']
def set_payoffs(self):
self.participant.vars['questions_correct_CT'] = sum([int(p.is_correct) for p in self.in_all_rounds()])
self.participant.vars['payment_formula'] = \
self.participant.vars['payment_formula'] + \
' + ' + str(self.participant.vars['questions_correct_CT']) + '*' + \
add_currency(self.session.config['currency_used'],
self.session.vars['rate_CT'] * self.session.config['real_world_currency_per_point'])
|
TatianaMayskaya/oTree
|
GameFeb19_questions_cognitive/models.py
|
models.py
|
py
| 4,609 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71669765628
|
cricketer = {
"VinayKumar": [102, 5],
"Yuzvendra Chahal": [89, 10],
"Sandeep Sharma": [95, 8],
"Umesh Yadav": [85, 6],
"BhuvaneswarKumar": [106, 10],
"Basil Thampi": [70, 5]
}
for player, stats in cricketer.items():
runs_conceded, wickets_taken = stats
bowling_average = runs_conceded / wickets_taken
cricketer[player] = [round(bowling_average, 2)]
sorted_cricketer = dict(sorted(cricketer.items(), key=lambda x: x[1]))
print(sorted_cricketer)
|
Rjeyyy/PythonProgramming
|
Dictionaries/program5.py
|
program5.py
|
py
| 483 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17270713471
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_regression
from mpl_toolkits import mplot3d
# Training phase/ Training the LR model/ Find optimal weights
def fit(X, y):
"""
X: Feature matrix: (n_samples, n_features)
y: y_true: (n_samples,1)
Returns: weights
weights: optimal weights (n_features, 1)
"""
X = X.copy()
ones_column = np.ones((len(X),1))
X = np.concatenate([ones_column, X], axis=1)
w = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y)
return w
# prediction
def predict(X, w):
"""
X: Feature matrix: (n_samples, n_features)
w: weight vector: (n_fetures, 1)
Returns:
y: y_pred = X.w (n_samples,1)
"""
X = X.copy()
ones_column = np.ones((len(X),1))
X = np.concatenate([ones_column, X], axis=1)
return X.dot(w)
# r_squared
def r_squared(ytrue, ypred):
e_method = ((ytrue-ypred)**2).sum() # sum of squares of residuals
e_baseline = ((ytrue-ytrue.mean())**2).sum() # total sum of squares
return 1 - e_method/e_baseline
# loss function
def loss(ytrue, ypred):
return ((ytrue-ypred)**2).sum()
X, y, coeff = make_regression(n_samples=100, n_features=2, coef=True, noise=0.5, bias=3, random_state=70)
# print(X.shape, y.shape)
# Train the model/ learn the optimal weights
w = fit(X, y)
####################################################
fig = plt.figure(figsize=(8,8))
ax = plt.axes(projection='3d')
ax.scatter(X[:,0], X[:,1], y, c=y, cmap='seismic')
f1 = np.linspace(X[:,0].min(), X[:,0].max(), 50)
f2 = np.linspace(X[:,1].min(), X[:,1].max(), 50)
f1, f2 = np.meshgrid(f1, f2)
# prediction plane
X_ = np.concatenate([f1.reshape(-1,1), f2.reshape(-1,1)], axis=1)
pred = predict(X_, w).reshape(f1.shape)
ax.plot_surface(f1, f2, pred, alpha=0.5, cmap='seismic')
ax.set_xlabel("Feature 1")
ax.set_ylabel("Feature 2")
ax.set_zlabel("Output (y)")
plt.show()
|
princeyyadav/CB-DS-LV-May21
|
DS/S13-linear-regression/viz.py
|
viz.py
|
py
| 1,905 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26459920205
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ..nnutils import geom_utils
from ..nnutils import loss_utils
from ..nnutils import train_utils
from ..nnutils import discriminators
from ..nnutils.smr import SoftRenderer
from ..nnutils import cub_mesh_s1 as mesh_net
from ..nnutils.nmr_pytorch import NeuralRenderer
from ..data import cub as cub_data
from ..utils import image as image_utils
from ..utils import tf_visualizer
from ..utils.tf_visualizer import Visualizer as TfVisualizer
import os
import time
import copy
import numpy as np
import os.path as osp
from absl import app, flags
from collections import OrderedDict
import torch
import torchvision
import soft_renderer as sr
import torchvision.utils as vutils
# Weights:
flags.DEFINE_float('mask_loss_wt', 3.0, 'mask loss weight')
flags.DEFINE_float('grl_wt', .2, 'gradient reversal layer weight')
flags.DEFINE_float('gan_loss_wt', 1., 'adversarial training weight')
flags.DEFINE_float('triangle_reg_wt', 0.15, 'weights to triangle smoothness prior')
flags.DEFINE_float('flatten_reg_wt', 0.0004, 'weights to flatten smoothness prior')
flags.DEFINE_float('deform_reg_wt', 5., 'reg to deformation')
flags.DEFINE_float('ori_reg_wt', 0.4, 'reg to orientation')
flags.DEFINE_float('stop_ori_epoch', 3., 'when to stop usint this constraint')
flags.DEFINE_float('tex_loss_wt', 3.0, 'weights to tex loss')
flags.DEFINE_float('tex_dt_loss_wt', 3.0, 'weights to tex dt loss')
flags.DEFINE_float('tex_cycle_loss_wt', .5, 'weights to tex cycle loss')
# Data:
flags.DEFINE_integer('image_size', 256, 'training image size')
# Model:
flags.DEFINE_string('renderer_type', 'softmax', 'choices are [hard, softmax]')
flags.DEFINE_boolean('use_gan', True, 'If true uses GAN training')
flags.DEFINE_boolean('pred_cam', True, 'If true predicts camera')
flags.DEFINE_boolean('detach_shape', True, 'If true detach shape from the texture branch.')
flags.DEFINE_boolean('detach_cam', True, 'If true detach camera from the texture branch.')
flags.DEFINE_boolean('use_scops', False, 'If true read part segmentations in the loader.')
flags.DEFINE_integer('update_template_freq', 5, 'template update frequency')
flags.DEFINE_integer('axis', 1, 'symmetric axis')
opts = flags.FLAGS
curr_path = osp.dirname(osp.abspath(__file__))
cache_path = osp.join(curr_path, '..', 'cachedir')
class ShapenetTrainer(train_utils.Trainer):
def define_model(self):
opts = self.opts
# define model
self.symmetric = opts.symmetric
img_size = (opts.img_size, opts.img_size)
self.model = mesh_net.MeshNet(
img_size, opts, nz_feat=opts.nz_feat,
axis = opts.axis)
self.model = self.model.cuda()
if(opts.multi_gpu):
self.model = torch.nn.DataParallel(self.model)
if(opts.use_gan):
self.discriminator = discriminators.Discriminator(lambda_ = opts.grl_wt,
img_size = opts.image_size)
self.discriminator = self.discriminator.cuda()
if(opts.multi_gpu):
self.discriminator = torch.nn.DataParallel(self.discriminator)
if(opts.multi_gpu):
faces = self.model.module.faces.view(1, -1, 3)
else:
faces = self.model.faces.view(1, -1, 3)
self.faces = faces.repeat(opts.batch_size, 1, 1)
# define renderers
self.renderer = SoftRenderer(opts.image_size, opts.renderer_type)
self.dis_renderer = SoftRenderer(opts.image_size, opts.renderer_type)
self.hard_renderer = SoftRenderer(opts.image_size, "hard")
if opts.use_texture:
self.tex_renderer = SoftRenderer(opts.image_size, opts.renderer_type)
self.tex_renderer.ambient_light_only()
self.vis_renderer = NeuralRenderer(opts.image_size)
self.vis_renderer.ambient_light_only()
self.vis_renderer.set_bgcolor([1, 1, 1])
self.vis_renderer.set_light_dir([0, 1, -1], 0.4)
self.iter_time = 0
return
def init_dataset(self):
opts = self.opts
self.data_module = cub_data
self.dataloader = self.data_module.data_loader(opts)
self.resnet_transform = torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def define_criterion(self):
# shape objectives
self.mask_loss_fn = loss_utils.neg_iou_loss
if(opts.multi_gpu):
verts = self.model.module.get_mean_shape().cpu()
faces = self.model.module.faces.cpu()
else:
verts = self.model.get_mean_shape().cpu()
faces = self.model.faces.cpu()
self.laplacian_loss_fn = sr.LaplacianLoss(verts, faces).cuda()
self.flatten_loss_fn = sr.FlattenLoss(faces).cuda()
if(opts.multi_gpu):
self.laplacian_loss_fn = torch.nn.DataParallel(self.laplacian_loss_fn)
self.flatten_loss_fn = torch.nn.DataParallel(self.flatten_loss_fn)
# shape constraints
self.deform_reg_fn = loss_utils.deform_l2reg
self.ori_reg_fn = loss_utils.sym_reg
self.gan_loss_fn = torch.nn.functional.binary_cross_entropy_with_logits
# texture objectives
if self.opts.use_texture:
self.texture_loss = loss_utils.PerceptualTextureLoss()
self.texture_dt_loss_fn = loss_utils.texture_dt_loss
self.texture_cycle_fn = loss_utils.TexCycle(int(opts.batch_size/opts.gpu_num))
self.texture_cycle_fn = self.texture_cycle_fn.cuda()
if(opts.multi_gpu):
self.texture_cycle_fn = torch.nn.DataParallel(self.texture_cycle_fn)
def set_input(self, batch):
opts = self.opts
input_img_tensor = batch['img'].type(torch.FloatTensor)
for b in range(input_img_tensor.size(0)):
input_img_tensor[b] = self.resnet_transform(input_img_tensor[b])
img_tensor = batch['img'].type(torch.FloatTensor)
mask_tensor = batch['mask'].type(torch.FloatTensor)
self.input_imgs = input_img_tensor.cuda()
self.imgs = img_tensor.cuda()
self.masks = mask_tensor.cuda()
if(opts.use_texture):
# Compute barrier distance transform.
mask_dts = np.stack([image_utils.compute_dt_barrier(m) for m in mask_tensor])
dt_tensor = torch.FloatTensor(mask_dts).cuda()
self.dts_barrier = dt_tensor.unsqueeze(1)
def forward(self):
opts = self.opts
outputs = self.model.forward(self.input_imgs)
# shape
self.delta_v = outputs['delta_v']
if(opts.symmetric):
if(opts.multi_gpu):
delta_v = self.model.module.symmetrize(self.delta_v)
self.mean_shape = self.model.module.get_mean_shape()
else:
delta_v = self.model.symmetrize(self.delta_v)
self.mean_shape = self.model.get_mean_shape()
else:
delta_v = self.delta_v
self.pred_vs = self.mean_shape + delta_v
# camera
proj_cam = outputs['cam']
self.proj_cam = proj_cam
# shape losses
self.pred_seen, _, _ = self.renderer.forward(self.pred_vs, self.faces, proj_cam)
self.mask_pred_seen = self.pred_seen[:,3,:,:]
self.mask_loss = self.mask_loss_fn(self.mask_pred_seen, self.masks)
self.triangle_loss = self.laplacian_loss_fn(self.pred_vs).mean()
self.flatten_loss = self.flatten_loss_fn(self.pred_vs).mean()
self.deform_loss = self.deform_reg_fn(self.delta_v)
self.ori_loss = self.ori_reg_fn(self.pred_vs)
# texture losses
if(opts.use_texture):
self.tex_flow = outputs['tex_flow']
self.uvimage_pred = outputs['uvimage_pred']
self.tex = geom_utils.sample_textures(self.tex_flow, self.imgs)
self.tex = self.tex.contiguous()
bs, fs, ts, _, _ = self.tex.size()
self.tex = self.tex.view(bs, fs, -1, 3)
texture_rgba, p2f_info, _ = self.tex_renderer.forward(self.pred_vs.detach(), self.faces, proj_cam.detach(), self.tex)
self.texture_pred = texture_rgba[:,0:3,:,:]
self.tex_loss = self.texture_loss(self.texture_pred, self.imgs, self.masks, self.mask_pred_seen)
self.tex_dt_loss = self.texture_dt_loss_fn(self.tex_flow, self.dts_barrier)
# texture cycle loss
_, _, aggr_info = self.hard_renderer(self.pred_vs.detach(), self.faces, proj_cam.detach())
aggr_info = aggr_info[:, 1, :, :].view(bs, -1)
tex_cycle_loss, self.avg_flow = self.texture_cycle_fn(self.tex_flow, p2f_info.detach(), aggr_info.detach())
# The mean is used to collect loss from different GPUs
self.tex_cycle_loss = torch.mean(tex_cycle_loss)
self.p2f_info = p2f_info
if(opts.use_gan):
# render at unobserved view
angles = np.random.randint(0, 180, size=bs)
random_cams = geom_utils.rotate_cam(proj_cam.detach(), angles)
pred_unseen, _, _ = self.dis_renderer.forward(self.pred_vs, self.faces, random_cams)
self.mask_pred_unseen = pred_unseen[:,3,:,:]
pred = torch.cat((self.pred_seen.detach(), pred_unseen))
gan_labels = torch.cat((torch.ones(self.pred_seen.shape[0]),
torch.zeros(pred_unseen.shape[0])), dim = 0)
gan_labels = gan_labels.cuda()
gan_preds = self.discriminator(pred[:,3,:,:].unsqueeze(1))
self.gan_loss = self.gan_loss_fn(gan_preds.squeeze(), gan_labels)
# add up all losses
# shape
self.total_loss = self.mask_loss * opts.mask_loss_wt
self.total_loss += self.triangle_loss * opts.triangle_reg_wt
self.total_loss += self.flatten_loss * opts.flatten_reg_wt
if(self.curr_epoch < opts.stop_ori_epoch):
# constrain prediction to be symmetric on the given axis
self.total_loss += self.ori_loss * opts.ori_reg_wt
if(self.curr_epoch > opts.update_template_freq):
# constrain prediction from deviating from template
self.total_loss += self.deform_loss * opts.deform_reg_wt
# texture
if(opts.use_texture):
self.total_loss += self.tex_loss * opts.tex_loss_wt
self.total_loss += self.tex_dt_loss * opts.tex_dt_loss_wt
self.total_loss += self.tex_cycle_loss * opts.tex_cycle_loss_wt
# GAN
if(opts.use_gan):
self.total_loss += self.gan_loss * opts.gan_loss_wt
def get_current_visuals(self):
vis_dict = {}
# UV maps
if self.opts.use_texture:
uv_flows = self.uvimage_pred
uv_flows = uv_flows.permute(0, 2, 3, 1)
uv_images = torch.nn.functional.grid_sample(self.imgs, uv_flows)
vis_dict['uv_images'] = uv_images
# mask
vis_dict['mask_pred'] = self.mask_pred_seen.unsqueeze(1)
nb, nf, _, nc = self.tex.size()
tex = self.tex.detach().view(nb, nf, opts.tex_size, opts.tex_size, nc).unsqueeze(4).repeat(1, 1, 1, 1, opts.tex_size, 1)
vis_dict['mask_gt'] = self.masks.unsqueeze(1)
# image
vis_dict['image_pred'] = self.vis_renderer(self.pred_vs.detach(), self.faces, self.proj_cam.detach(), tex)
vis_dict['image_gt'] = self.imgs * self.masks.unsqueeze(1).repeat(1, 3, 1, 1)
# instance mesh
if(self.opts.use_texture):
mesh_ = sr.Mesh(self.pred_vs[0], self.faces[0], self.tex[0].view(self.faces.size(1),-1,3))
else:
mesh_ = sr.Mesh(self.pred_vs[0], self.faces[0])
vis_dict['mesh'] = mesh_
# template mesh
if(opts.multi_gpu):
template_mesh_ = sr.Mesh(self.model.module.get_mean_shape(), self.faces[0])
else:
template_mesh_ = sr.Mesh(self.model.get_mean_shape(), self.faces[0])
vis_dict['template_mesh'] = template_mesh_
return vis_dict
def get_current_scalars(self):
opts = self.opts
sc_dict = OrderedDict([
('smoothed_total_loss', self.smoothed_total_loss),
('total_loss', self.total_loss),
('mask_loss', self.mask_loss),
('tri_loss', self.triangle_loss),
('flatten_loss', self.flatten_loss),
('deform_loss', self.deform_loss),
('ori_loss', self.ori_loss),
('lr', self.optimizer.param_groups[0]['lr']),
('iter_time', self.iter_time),
])
if opts.use_texture:
sc_dict['tex_loss'] = self.tex_loss
sc_dict['tex_dt_loss'] = self.tex_dt_loss
sc_dict['tex_cycle_loss'] = self.tex_cycle_loss
return sc_dict
'''Overwrite train function for template update.'''
def train(self):
opts = self.opts
self.visualizer = TfVisualizer(opts)
self.smoothed_total_loss = 0
visualizer = self.visualizer
total_steps = 0
optim_steps = 0
dataset_size = len(self.dataloader)
for epoch in range(opts.num_pretrain_epochs, opts.num_epochs):
epoch_iter = 0
self.curr_epoch = epoch
for i, batch in enumerate(self.dataloader):
self.iteration_num += 1
self.adjust_learning_rate(self.optimizer)
t_init = time.time()
self.set_input(batch)
t_batch = time.time()
if not self.invalid_batch:
optim_steps += 1
self.optimizer.zero_grad()
start_time = time.time()
self.forward()
self.smoothed_total_loss = self.smoothed_total_loss*0.99 + 0.01*self.total_loss
t_forw = time.time()
self.total_loss.backward()
t_backw = time.time()
if optim_steps % opts.optim_bs == 0:
self.optimizer.step()
end_time = time.time()
self.iter_time = end_time - start_time
t_opt = time.time()
total_steps += 1
epoch_iter += 1
if opts.display_visuals and (total_steps % opts.display_freq == 0):
iter_end_time = time.time()
vis_dict = self.get_current_visuals()
for k,v in vis_dict.items():
if('mesh' in k):
v.save_obj(os.path.join(self.vis_dir,'{}.obj'.format(k)), save_texture=True)
else:
vutils.save_image(v, os.path.join(self.vis_dir, k + '.png'))
print(tf_visualizer.green("Visualization saved at {}.".format(self.vis_dir)))
if opts.print_scalars and (total_steps % opts.print_freq == 0):
scalars = self.get_current_scalars()
visualizer.print_current_scalars(epoch, epoch_iter, scalars)
if total_steps % opts.save_latest_freq == 0:
print(tf_visualizer.green('saving the model at the end of epoch {:d}, iters {:d}'.format(epoch, total_steps)))
self.save('latest')
if total_steps == opts.num_iter:
return
# update template
if((epoch+1) % opts.update_template_freq == 0):
print(tf_visualizer.green('Updating template...'))
self.feat = torch.zeros(opts.batch_size, opts.z_dim)
self.feat = self.feat.cuda()
# compute average encoder features
for i, batch in enumerate(self.dataloader):
self.set_input(batch)
with torch.no_grad():
outputs = self.model(self.input_imgs)
self.feat += outputs['feat']
self.feat = self.feat / (i + 1)
self.feat = torch.mean(self.feat, dim=0).unsqueeze(0)
# feed averaged features into the shape decoder
if(opts.multi_gpu):
with torch.no_grad():
delta_v = self.model.module.shape_predictor(self.feat)
self.model.module.mean_v += delta_v.squeeze()
else:
with torch.no_grad():
delta_v = self.model.shape_predictor(self.feat)
self.model.mean_v += delta_v.squeeze()
print(tf_visualizer.green('Template updated.'))
if (epoch+1) % opts.save_epoch_freq == 0:
print(tf_visualizer.green('saving the model at the end of epoch {:d}, iters {:d}'.format(epoch, total_steps)))
self.save('latest')
self.save(epoch+1)
def main(_):
torch.manual_seed(0)
trainer = ShapenetTrainer(opts)
trainer.init_training()
trainer.train()
if __name__ == '__main__':
app.run(main)
|
NVlabs/UMR
|
experiments/train_s1.py
|
train_s1.py
|
py
| 17,158 |
python
|
en
|
code
| 223 |
github-code
|
6
|
43077970864
|
from typing import Any, Callable, Dict, Optional, Type, Union
from fugue.execution.execution_engine import ExecutionEngine, SQLEngine
from fugue.execution.native_execution_engine import NativeExecutionEngine
from triad.utils.convert import to_instance
from triad import assert_or_throw, ParamDict
class _ExecutionEngineFactory(object):
def __init__(self):
self._funcs: Dict[str, Callable] = {}
self._type_funcs: Dict[Type, Callable] = {}
self._sql_funcs: Dict[str, Callable] = {}
self.register_default(lambda conf, **kwargs: NativeExecutionEngine(conf=conf))
self.register_default_sql_engine(lambda engine, **kwargs: engine.sql_engine)
def register(
self, name_or_type: Union[str, Type], func: Callable, on_dup="overwrite"
) -> None:
if isinstance(name_or_type, str):
self._register(self._funcs, name=name_or_type, func=func, on_dup=on_dup)
else:
self._register(
self._type_funcs, name=name_or_type, func=func, on_dup=on_dup
)
def register_default(self, func: Callable, on_dup="overwrite") -> None:
self.register("", func, on_dup)
def register_sql_engine(
self, name: str, func: Callable, on_dup="overwrite"
) -> None:
self._register(self._sql_funcs, name=name, func=func, on_dup=on_dup)
def register_default_sql_engine(self, func: Callable, on_dup="overwrite") -> None:
self.register_sql_engine("", func, on_dup)
def make(
self, engine: Any = None, conf: Any = None, **kwargs: Any
) -> ExecutionEngine:
if isinstance(engine, tuple):
execution_engine = self.make_execution_engine(
engine[0], conf=conf, **kwargs
)
sql_engine = self.make_sql_engine(engine[1], execution_engine)
execution_engine.set_sql_engine(sql_engine)
return execution_engine
else:
return self.make((engine, None), conf=conf, **kwargs)
def make_execution_engine(
self, engine: Any = None, conf: Any = None, **kwargs: Any
) -> ExecutionEngine:
# Apply this function to an Execution Engine instance can
# make sure the compile conf is a superset of conf
# TODO: it's a mess here, can we make the logic more intuitive?
def make_engine(engine: Any) -> ExecutionEngine:
if isinstance(engine, str) and engine in self._funcs:
return self._funcs[engine](conf, **kwargs)
for k, f in self._type_funcs.items():
if isinstance(engine, k):
return f(engine, conf, **kwargs)
if isinstance(engine, ExecutionEngine):
if conf is not None:
engine.compile_conf.update(conf)
engine.compile_conf.update(kwargs)
return engine
return to_instance(
engine, ExecutionEngine, kwargs=dict(conf=conf, **kwargs)
)
result = make_engine(engine or "")
result.compile_conf.update(result.conf, on_dup=ParamDict.IGNORE)
result.compile_conf.update(conf, on_dup=ParamDict.OVERWRITE)
result.compile_conf.update(kwargs, on_dup=ParamDict.OVERWRITE)
return result
def make_sql_engine(
self,
engine: Any = None,
execution_engine: Optional[ExecutionEngine] = None,
**kwargs: Any,
) -> SQLEngine:
if engine is None:
engine = ""
if isinstance(engine, str) and engine in self._sql_funcs:
return self._sql_funcs[engine](execution_engine, **kwargs)
if isinstance(engine, SQLEngine):
assert_or_throw(
execution_engine is None and len(kwargs) == 0,
lambda: ValueError(
f"{engine} is an instance, can't take arguments "
f"execution_engine={execution_engine}, kwargs={kwargs}"
),
)
return engine
return to_instance(
engine, SQLEngine, kwargs=dict(execution_engine=execution_engine, **kwargs)
)
def _register(
self,
callables: Dict[Any, Callable],
name: Any,
func: Callable,
on_dup="overwrite",
) -> None:
if name not in callables:
callables[name] = func
if on_dup in ["raise", "throw"]:
raise KeyError(f"{name} is already registered")
if on_dup == "overwrite":
callables[name] = func
return
if on_dup == "ignore":
return
raise ValueError(on_dup)
_EXECUTION_ENGINE_FACTORY = _ExecutionEngineFactory()
def register_execution_engine(
name_or_type: Union[str, Type], func: Callable, on_dup="overwrite"
) -> None:
"""Register :class:`~fugue.execution.execution_engine.ExecutionEngine` with
a given name.
:param name_or_type: alias of the execution engine, or type of an object that
can be converted to an execution engine
:param func: a callable taking |ParamsLikeObject| and ``**kwargs`` and returning an
:class:`~fugue.execution.execution_engine.ExecutionEngine` instance
:param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore"
(not overwriting) or "throw" (throw exception), defaults to "overwrite".
:raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists
.. admonition:: Examples
Alias registration examples:
.. code-block:: python
# create a new engine with name my (overwrites if existed)
register_execution_engine("my", lambda conf: MyExecutionEngine(conf))
# 0
make_execution_engine("my")
make_execution_engine("my", {"myconfig":"value})
# 1
with FugueWorkflow("my") as dag:
dag.create([[0]],"a:int").show()
# 2
dag = FugueWorkflow()
dag.create([[0]],"a:int").show()
dag.run("my", {"myconfig":"value})
# 3
fsql('''
CREATE [[0]] SCHEMA a:int
PRINT
''').run("my")
Type registration examples:
.. code-block:: python
from pyspark.sql import SparkSession
from fugue_spark import SparkExecutionEngine
from fugue_sql import fsql
register_execution_engine(
SparkSession,
lambda session, conf: SparkExecutionEngine(session, conf))
spark_session = SparkSession.builder.getOrCreate()
fsql('''
CREATE [[0]] SCHEMA a:int
PRINT
''').run(spark_session)
"""
_EXECUTION_ENGINE_FACTORY.register(name_or_type, func, on_dup)
def register_default_execution_engine(func: Callable, on_dup="overwrite") -> None:
"""Register :class:`~fugue.execution.execution_engine.ExecutionEngine` as the
default engine.
:param func: a callable taking |ParamsLikeObject| and ``**kwargs`` and returning an
:class:`~fugue.execution.execution_engine.ExecutionEngine` instance
:param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore"
(not overwriting) or "throw" (throw exception), defaults to "overwrite".
:raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists
.. admonition:: Examples
.. code-block:: python
# create a new engine with name my (overwrites if existed)
register_default_execution_engine(lambda conf: MyExecutionEngine(conf))
# the following examples will use MyExecutionEngine
# 0
make_execution_engine()
make_execution_engine(None, {"myconfig":"value})
# 1
with FugueWorkflow() as dag:
dag.create([[0]],"a:int").show()
# 2
dag = FugueWorkflow()
dag.create([[0]],"a:int").show()
dag.run(None, {"myconfig":"value})
# 3
fsql('''
CREATE [[0]] SCHEMA a:int
PRINT
''').run("", {"myconfig":"value})
"""
_EXECUTION_ENGINE_FACTORY.register_default(func, on_dup)
def register_sql_engine(name: str, func: Callable, on_dup="overwrite") -> None:
"""Register :class:`~fugue.execution.execution_engine.SQLEngine` with
a given name.
:param name: name of the SQL engine
:param func: a callable taking
:class:`~fugue.execution.execution_engine.ExecutionEngine`
and ``**kwargs`` and returning a
:class:`~fugue.execution.execution_engine.SQLEngine` instance
:param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore"
(not overwriting) or "throw" (throw exception), defaults to "overwrite".
:raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists
.. admonition:: Examples
.. code-block:: python
# create a new engine with name my (overwrites if existed)
register_sql_engine("mysql", lambda engine: MySQLEngine(engine))
# create execution engine with MySQLEngine as the default
make_execution_engine(("", "mysql"))
# create DaskExecutionEngine with MySQLEngine as the default
make_execution_engine(("dask", "mysql"))
# default execution engine + MySQLEngine
with FugueWorkflow(("","mysql")) as dag:
dag.create([[0]],"a:int").show()
"""
_EXECUTION_ENGINE_FACTORY.register_sql_engine(name, func, on_dup)
def register_default_sql_engine(func: Callable, on_dup="overwrite") -> None:
"""Register :class:`~fugue.execution.execution_engine.SQLEngine` as the
default engine
:param func: a callable taking
:class:`~fugue.execution.execution_engine.ExecutionEngine`
and ``**kwargs`` and returning a
:class:`~fugue.execution.execution_engine.SQLEngine` instance
:param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore"
(not overwriting) or "throw" (throw exception), defaults to "overwrite".
:raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists
.. note::
You should be careful to use this function, because when you set a custom
SQL engine as default, all execution engines you create will use this SQL
engine unless you are explicit. For example if you set the default SQL engine
to be a Spark specific one, then if you start a NativeExecutionEngine, it will
try to use it and will throw exceptions.
So it's always a better idea to use ``register_sql_engine`` instead
.. admonition:: Examples
.. code-block:: python
# create a new engine with name my (overwrites if existed)
register_default_sql_engine(lambda engine: MySQLEngine(engine))
# create NativeExecutionEngine with MySQLEngine as the default
make_execution_engine()
# create SparkExecutionEngine with MySQLEngine instead of SparkSQLEngine
make_execution_engine("spark")
# NativeExecutionEngine with MySQLEngine
with FugueWorkflow() as dag:
dag.create([[0]],"a:int").show()
"""
_EXECUTION_ENGINE_FACTORY.register_default_sql_engine(func, on_dup)
def make_execution_engine(
engine: Any = None, conf: Any = None, **kwargs: Any
) -> ExecutionEngine:
"""Create :class:`~fugue.execution.execution_engine.ExecutionEngine`
with specified ``engine``
:param engine: it can be empty string or null (use the default execution
engine), a string (use the registered execution engine), an
:class:`~fugue.execution.execution_engine.ExecutionEngine` type, or
the :class:`~fugue.execution.execution_engine.ExecutionEngine` instance
, or a tuple of two values where the first value represents execution
engine and the second value represents the sql engine (you can use ``None``
for either of them to use the default one), defaults to None
:param conf: |ParamsLikeObject|, defaults to None
:param kwargs: additional parameters to initialize the execution engine
:return: the :class:`~fugue.execution.execution_engine.ExecutionEngine`
instance
.. admonition:: Examples
.. code-block:: python
register_default_execution_engine(lambda conf: E1(conf))
register_execution_engine("e2", lambda conf, **kwargs: E2(conf, **kwargs))
register_sql_engine("s", lambda conf: S2(conf))
# E1 + E1.default_sql_engine
make_execution_engine()
# E2 + E2.default_sql_engine
make_execution_engine(e2)
# E1 + S2
make_execution_engine((None, "s"))
# E2(conf, a=1, b=2) + S2
make_execution_engine(("e2", "s"), conf, a=1, b=2)
# SparkExecutionEngine + SparkSQLEngine
make_execution_engine(SparkExecutionEngine)
make_execution_engine(SparkExecutionEngine(spark_session, conf))
# SparkExecutionEngine + S2
make_execution_engine((SparkExecutionEngine, "s"))
"""
import fugue._utils.register # pylint: disable=W0611 # noqa: F401
return _EXECUTION_ENGINE_FACTORY.make(engine, conf, **kwargs)
def make_sql_engine(
engine: Any = None,
execution_engine: Optional[ExecutionEngine] = None,
**kwargs: Any,
) -> SQLEngine:
"""Create :class:`~fugue.execution.execution_engine.SQLEngine`
with specified ``engine``
:param engine: it can be empty string or null (use the default SQL
engine), a string (use the registered SQL engine), an
:class:`~fugue.execution.execution_engine.SQLEngine` type, or
the :class:`~fugue.execution.execution_engine.SQLEngine` instance
(you can use ``None`` to use the default one), defaults to None
:param execution_engine: the
:class:`~fugue.execution.execution_engine.ExecutionEngine` instance
to create
the :class:`~fugue.execution.execution_engine.SQLEngine`. Normally you
should always provide this value.
:param kwargs: additional parameters to initialize the sql engine
:return: the :class:`~fugue.execution.execution_engine.SQLEngine`
instance
.. note::
For users, you normally don't need to call this function directly.
Use ``make_execution_engine`` instead
.. admonition:: Examples
.. code-block:: python
register_default_sql_engine(lambda conf: S1(conf))
register_sql_engine("s2", lambda conf: S2(conf))
engine = NativeExecutionEngine()
# S1(engine)
make_sql_engine(None, engine)
# S1(engine, a=1)
make_sql_engine(None, engine, a=1)
# S2(engine)
make_sql_engine("s2", engine)
# SqliteEngine(engine)
make_sql_engine(SqliteEngine)
"""
import fugue._utils.register # pylint: disable=W0611 # noqa: F401
return _EXECUTION_ENGINE_FACTORY.make_sql_engine(engine, execution_engine, **kwargs)
|
ofili/Wrangle-and-Analyze-Data
|
venv/Lib/site-packages/fugue/execution/factory.py
|
factory.py
|
py
| 15,192 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70037278908
|
import math
from cmath import exp
import numpy as np
import pandas as pd
from Operators import Operator, Density_Matrix, Observable
from Many_Body import tensor_product
from Nuclear_Spin import Nuclear_Spin, Many_Spins
def h_zeeman(spin, theta_z, phi_z, B_0):
"""
Computes the term of the Hamiltonian associated with the Zeeman interaction between the nuclear spin and the external static field.
Parameters
----------
- spin: Nuclear_Spin
Spin under study;
- theta_z: float
Polar angle of the magnetic field in the laboratory coordinate system (expressed in radians);
- phi_z: float
Azimuthal angle of the magnetic field in the laboratory coordinate system (expressed in radians);
- B_0: non-negative float
Magnitude of the external magnetic field (expressed in tesla).
Returns
-------
An Observable object which represents the Zeeman Hamiltonian in the laboratory reference frame (expressed in MHz).
Raises
------
ValueError, when the passed B_0 is a negative number.
"""
if B_0<0: raise ValueError("The modulus of the magnetic field must be a non-negative quantity")
h_z = -spin.gyro_ratio_over_2pi*B_0* \
(math.sin(theta_z)*math.cos(phi_z)*spin.I['x'] + \
math.sin(theta_z)*math.sin(phi_z)*spin.I['y'] + \
math.cos(theta_z)*spin.I['z'])
return Observable(h_z.matrix)
def h_quadrupole(spin, e2qQ, eta, alpha_q, beta_q, gamma_q):
"""
Computes the term of the Hamiltonian associated with the quadrupolar interaction.
Parameters
----------
- spin: Nuclear_Spin
Spin under study;
- e2qQ: float
Product of the quadrupole moment constant, eQ, and the eigenvalue of the EFG tensor which is greatest in absolute value, eq. e2qQ is measured in MHz;
- eta: float in the interval [0, 1]
Asymmetry parameter of the EFG;
- alpha_q, beta_q, gamma_q: float
Euler angles for the conversion from the system of the principal axes of the EFG tensor (PAS) to the lab system (LAB) (expressed in radians).
Returns
-------
If the quantum number of the spin is 1/2, the whole calculation is skipped and a null Observable object is returned.
Otherwise, the function returns the Observable object which correctly represents the quadrupolar Hamiltonian in the laboratory reference frame (expressed in MHz).
"""
if math.isclose(spin.quantum_number, 1/2, rel_tol=1e-10):
return Observable(spin.d)*0
I = spin.quantum_number
h_q = (e2qQ/(I*(2*I-1)))* \
((1/2)*(3*(spin.I['z']**2) - Operator(spin.d)*I*(I+1))*v0_EFG(eta, alpha_q, beta_q, gamma_q)+\
(math.sqrt(6)/4)*
((spin.I['z']*spin.I['+'] + spin.I['+']*spin.I['z'])*\
v1_EFG(-1, eta, alpha_q, beta_q, gamma_q) + \
(spin.I['z']*spin.I['-'] + spin.I['-']*spin.I['z'])*\
v1_EFG(+1, eta, alpha_q, beta_q, gamma_q) + \
(spin.I['+']**2)*\
v2_EFG(-2, eta, alpha_q, beta_q, gamma_q) + \
(spin.I['-']**2)*\
v2_EFG(2, eta, alpha_q, beta_q, gamma_q)))
return Observable(h_q.matrix)
def v0_EFG(eta, alpha_q, beta_q, gamma_q):
"""
Returns the component V0 of the EFG tensor (divided by eq) as seen in the LAB system. This quantity is expressed in terms of the Euler angles which relate PAS and LAB systems and the parameter eta.
Parameters
----------
- eta: float in the interval [0, 1]
Asymmetry parameter of the EFG;
- alpha_q, beta_q, gamma_q: float
Euler angles connecting the system of the principal axes of the EFG tensor (PAS) to the lab system (LAB) (expressed in radians).
Returns
-------
A float representing the component V0 (divided by eq) of the EFG tensor evaluated in the LAB system.
Raises
ValueError, when the passed eta is not in the interval [0, 1].
"""
if eta<0 or eta>1: raise ValueError("The asymmetry parameter must fall in the interval [0, 1]")
v0 = (1/2)*(((3*(math.cos(beta_q))**2-1)/2) - (eta*(math.sin(beta_q))**2)*(math.cos(2*gamma_q))/2)
return v0
def v1_EFG(sign, eta, alpha_q, beta_q, gamma_q):
"""
Returns the components V+/-1 of the EFG tensor (divided by eq) as seen in the LAB system. These quantities are expressed in terms of the Euler angles which relate PAS and LAB systems and the parameter eta.
Parameters
----------
- sign: float
Specifies wether the V+1 or the V-1 component is to be computed;
- eta: float in the interval [0, 1]
Asymmetry parameter of the EFG;
- alpha_q, beta_q, gamma_q: float
Euler angles connecting the system of the principal axes of the EFG tensor (PAS) to the lab system (LAB) (expressed in radians).
Returns
-------
A complex number representing the component:
- V<sup>+1</sup>, if sign is positive;
- V<sup>-1</sup>, if sign is negative.
of the EFG tensor (divided by eq).
Raises
------
ValueError, when the passed eta is not in the interval [0, 1].
"""
if eta<0 or eta>1: raise ValueError("The asymmetry parameter must fall within the interval [0, 1]")
sign = np.sign(sign)
v1 = (1/2)*\
(
-1j*sign*math.sqrt(3/8)*math.sin(2*beta_q)*exp(sign*1j*alpha_q)+\
1j*(eta/(math.sqrt(6)))*math.sin(beta_q)*\
(
((1+sign*math.cos(beta_q))/2)*exp(1j*(sign*alpha_q+2*gamma_q))-\
((1-sign*math.cos(beta_q))/2)*exp(1j*(sign*alpha_q-2*gamma_q))
)
)
return v1
def v2_EFG(sign, eta, alpha_q, beta_q, gamma_q):
"""
Returns the components V+/-2 of the EFG tensor (divided by eq) as seen in the LAB system. These quantities are expressed in terms of the Euler angles which relate PAS and LAB systems and the parameter eta.
Parameters
----------
- sign: float
Specifies wether the V+2 or the V-2 component is to be returned;
- eta: float in the interval [0, 1]
Asymmetry parameter of the EFG tensor;
- alpha_q, beta_q, gamma_q: float
Euler angles connecting the system of the principal axes of the EFG tensor (PAS) to the lab system (LAB) (expressed in radians).
Returns
-------
A float representing the component:
- V+2, if sign is positive;
- V-2, if sign is negative.
of the EFG tensor (divided by eq).
Raises
------
ValueError, when the passed eta is not in the interval [0, 1].
"""
if eta<0 or eta>1: raise ValueError("The asymmetry parameter must fall in the interval [0, 1]")
sign = np.sign(sign)
v2 = (1/2)*\
(math.sqrt(3/8)*((math.sin(beta_q))**2)*exp(sign*2j*alpha_q)+\
(eta/math.sqrt(6))*exp(sign*2j*alpha_q)*\
(
exp(2j*gamma_q)*((1+sign*math.cos(beta_q))**2)/4 +\
exp(-2j*gamma_q)*((1-sign*math.cos(beta_q))**2)/4
)
)
return v2
def h_single_mode_pulse(spin, frequency, B_1, phase, theta_1, phi_1, t):
"""
Computes the term of the Hamiltonian describing the interaction with a monochromatic and linearly polarized electromagnetic pulse.
Parameters
----------
- spin: Nuclear_Spin
Spin under study.
- frequency: non-negative float
Frequency of the monochromatic wave (expressed in MHz).
- phase: float
Inital phase of the wave (at t=0) (expressed in radians).
- B_1: non-negative float
Maximum amplitude of the oscillating magnetic field (expressed in tesla).
- theta_1, phi_1: float
Polar and azimuthal angles of the direction of polarization of the magnetic wave in the LAB frame (expressed in radians);
- t: float
Time of evaluation of the Hamiltonian (expressed in microseconds).
Returns
-------
An Observable object which represents the Hamiltonian of the coupling with the electromagnetic pulse evaluated at time t (expressed in MHz).
Raises
------
ValueError, in two distinct cases:
1. When the passed frequency parameter is a negative quantity;
2. When the passed B_1 parameter is a negative quantity.
"""
if frequency < 0: raise ValueError("The modulus of the angular frequency of the electromagnetic wave must be a positive quantity")
if B_1 < 0: raise ValueError("The amplitude of the electromagnetic wave must be a positive quantity")
h_pulse = -spin.gyro_ratio_over_2pi*B_1*\
(math.sin(theta_1)*math.cos(phi_1)*spin.I['x'] +\
math.sin(theta_1)*math.sin(phi_1)*spin.I['y'] +\
math.cos(theta_1)*spin.I['z']
)*\
math.cos(2*math.pi*frequency*t-phase)
return Observable(h_pulse.matrix)
def h_multiple_mode_pulse(spin, mode, t):
"""
Computes the term of the Hamiltonian describing the interaction with a superposition of single-mode electromagnetic pulses. If the passed argument spin is a Nuclear_Spin object, the returned Hamiltonian will describe the interaction between the pulse of radiation and the single spin; if it is a Many_Spins object, it will represent the interaction with the whole system of many spins.
Parameters
----------
- spin: Nuclear_Spin or Many_Spins
Spin or spin system under study;
- mode: pandas.DataFrame
Table of the parameters of each electromagnetic mode in the superposition. It is organised according to the following template:
| index | 'frequency' | 'amplitude' | 'phase' | 'theta_p' | 'phi_p' |
| ----- | ------------- | ------------- | --------- | ----------- | --------- |
| | (MHz) | (T) | (rad) | (rad) | (rad) |
| 0 | omega_0 | B_0 | phase_0 | theta_0 | phi_0 |
| 1 | omega_1 | B_1 | phase_1 | theta_1 | phi_1 |
| ... | ... | ... | ... | ... | ... |
| N | omega_N | B_N | phase_N | theta_N | phi_N |
where the meaning of each column is analogous to the corresponding parameters in h_single_mode_pulse.
- t: float
Time of evaluation of the Hamiltonian (expressed in microseconds).
Returns
-------
An Observable object which represents the Hamiltonian of the coupling with the superposition of the given modes evaluated at time t (expressed in MHz).
"""
h_pulse = Operator(spin.d)*0
omega = mode['frequency']
B = mode['amplitude']
phase = mode['phase']
theta = mode['theta_p']
phi = mode['phi_p']
if isinstance(spin, Many_Spins):
for i in mode.index:
h_pulse = Operator(spin.d)*0
for n in range(spin.n_spins):
term_n = h_single_mode_pulse(spin.spin[n], omega[i], B[i], phase[i], theta[i], phi[i], t)
for m in range(spin.n_spins)[:n]:
term_n = tensor_product(Operator(spin.spin[m].d), term_n)
for l in range(spin.n_spins)[n+1:]:
term_n = tensor_product(term_n, Operator(spin.spin[l].d))
h_pulse = h_pulse + term_n
elif isinstance(spin, Nuclear_Spin):
for i in mode.index:
h_pulse = h_pulse + h_single_mode_pulse(spin, omega[i], B[i], phase[i], theta[i], phi[i], t)
return Observable(h_pulse.matrix)
# Global Hamiltonian of the system (stationary term + pulse term) cast in the picture generated by
# the Operator h_change_of_picture
def h_changed_picture(spin, mode, h_unperturbed, h_change_of_picture, t):
"""
Returns the global Hamiltonian of the system, made up of the time-dependent term h_multiple_mode_pulse(spin, mode, t) and the stationary term h_unperturbed, cast in the picture generated by h_change_of_picture.
Parameters
----------
- spin, mode, t: same meaning as the corresponding arguments of h_multiple_mode_pulse;
- h_unperturbed: Operator
Stationary term of the global Hamiltonian (in MHz);
- h_change_of_picture: Operator
Operator which generates the new picture (in MHz).
Returns
-------
Observable object representing the Hamiltonian of the pulse evaluated at time t in the new picture (in MHz).
"""
h_cp = (h_unperturbed + h_multiple_mode_pulse(spin, mode, t) - \
h_change_of_picture).changed_picture(h_change_of_picture, t)
return Observable(h_cp.matrix)
def h_j_coupling(spins, j_matrix):
"""
Returns the term of the Hamiltonian describing the J-coupling between the spins of a system of many nuclei.
Parameters
----------
- spins: Many_Spins
Spins' system under study;
- j_matrix: np.ndarray
Array storing the coefficients Jmn which enter the formula for the computation of the Hamiltonian for the j-coupling.
Remark: j_matrix doesn't have to be symmetric, since the function reads only those elements located in the upper half with respect to the diagonal. This means that the elements j_matrix[m, n] which matter are those for which m<n.
Returns
-------
Observable object acting on the full Hilbert space of the spins' system representing the Hamiltonian of the J-coupling between the spins.
"""
h_j = Operator(spins.d)*0
for m in range(j_matrix.shape[0]):
for n in range(m):
term_nm = j_matrix[n, m]*spins.spin[n].I['z']
for l in range(n):
term_nm = tensor_product(Operator(spins.spin[l].d), term_nm)
for k in range(m)[n+1:]:
term_nm = tensor_product(term_nm, Operator(spins.spin[k].d))
term_nm = tensor_product(term_nm, spins.spin[m].I['z'])
for j in range(spins.n_spins)[m+1:]:
term_nm = tensor_product(term_nm, Operator(spins.spin[j].d))
h_j = h_j + term_nm
return h_j.cast_to_observable()
|
DavideCandoli/PULSEE
|
Code/Hamiltonians.py
|
Hamiltonians.py
|
py
| 14,405 |
python
|
en
|
code
| 1 |
github-code
|
6
|
1130853243
|
# Get all positions around given point within 2 km circle.
# ZHOU Kunpeng, 14 Dec 2018
from model import models
from controller import utils
# Get all positions around 2km range centered at given point
class GetPositionsAround():
# params: longitude (float), latitude(float)
# returns: a list of positions with their item information
def getPositionsAround(self, longitude, latitude):
# Range of the circle (only get positions within range)
RANGE = 2
positionsAround = []
positions = models.Position.objects.all()
for position in positions:
# within 2 km range
dist = utils.getDistance(longitude, latitude, position.longitude, position.latitude)
if dist <= RANGE:
# positionsAround.append([position.id, position.longitude, position.latitude, position])
positionsAround.append(position)
return positionsAround
# Get all positions on map
class GetAllPositions():
# params: longitude (float), latitude(float)
# returns: a list of positions with their item information
def getAllPositions(self):
positionsRet = []
positions = models.Position.objects.all()
for position in positions:
positionsRet.append(position)
return positionsRet
# update last location of a user
class UpdateUserLocation():
# params: user(string), longitude(float), latitude(float)
def updateUserLocation(self, userId, longitude, latitude):
user = models.User.objects.get(wechatId = userId)
user.lastLongitude = longitude
user.lastLatitude = latitude
user.save()
from controller.pet import petDAOs
# A user checks in a given position
class CheckIn():
# params: wechatId (string), positionId (int)
# returns: a dictionary that shows total effect on user's pet.
def checkIn(self, wechatId, positionId):
# get user and position
user = models.User.objects.filter(wechatId = wechatId)[0]
position = models.Position.objects.filter(id = positionId)[0]
if user == None or position == None:
return None
# check if this place (position) has been checked in by this user
checkInQuery = models.CheckInRecord.objects.filter(user=user, point=position)
if len(checkInQuery) != 0:
return None
# Create a check-in record
checkInRecord = models.CheckInRecord.objects.create(user=user, point=position)
# Upgrade pet's ability
pet = user.pets.all()[0]
item = position.itemLinked
# pet.experience += item.addExp
pet.health += item.addHealth
pet.attack += item.addAttack
pet.defend += item.addDefend
pet.speed += item.addSpeed
pet.dodgeRate += item.addDodgeRate
pet.save()
# update pet's experience (if leveled-up, ability will be updated accordingly)
petDAOs.UpdateExperience().updateExperience(pet.id, pet.experience + item.addExp)
return item
|
wangtong2015/EnjoyWideWorld
|
back-end/EnjoyWideWorld/controller/map/mapDAOs.py
|
mapDAOs.py
|
py
| 3,048 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18823308841
|
import random
DESCRIPTION = "Find the greatest common divisor of given numbers."
def find_gcd(x, y):
while x and y:
if x > y:
x = x % y
else:
y = y % x
gcd = str(x + y)
return gcd
def get_question_and_answer():
a = random.randint(1, 100)
b = random.randint(1, 100)
question = f"{a} {b}"
correct_answer = str(find_gcd(a, b))
return question, correct_answer
|
QQpy3ko/project-lvl1-s566
|
brain_games/games/brain_gcd.py
|
brain_gcd.py
|
py
| 436 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39201428004
|
import cv2
import time
import os
import HandTrackingModule as htm
from dronekit import connect, VehicleMode, LocationGlobalRelative, APIException
import time
import socket
import math
import argparse
from pymavlink import mavutil
from time import sleep
import numpy as np
###################################################################################
def connectMyCopter():
parser=argparse.ArgumentParser(description='commands')
parser.add_argument('--connect', default='127.0.0.1:14550')
args=parser.parse_args()
connection_string=args.connect
baud_rate=921600
vehicle=connect(connection_string, baud=baud_rate, wait_ready=True)
return vehicle
###################################################################################
# Function to arm and takeoff
def arm_and_takeoff(TargetAltitude):
# Switch vehicle to Guided Mode
vehicle.mode = VehicleMode("GUIDED")
while vehicle.mode!="GUIDED":
print("Waiting for guided mode")
time.sleep(1)
# Arming the Vehicle
vehicle.armed = True
while vehicle.armed == False:
print("Waiting for the vehicle to be armed")
time.sleep(1)
vehicle.simple_takeoff(TargetAltitude)
while True:
print("Current Altitude: %d" , vehicle.location.global_relative_frame.alt)
if vehicle.location.global_relative_frame.alt >= TargetAltitude*.95:
break
time.sleep(1)
print("Target Altitude reached")
return None
##################################################################
#-- Define the function for sending mavlink velocity command in body frame
def set_velocity_body(vehicle, vx, vy, vz):
""" Remember: vz is positive downward!!!
http://ardupilot.org/dev/docs/copter-commands-in-guided-mode.html
Bitmask to indicate which dimensions should be ignored by the vehicle
(a value of 0b0000000000000000 or 0b0000001000000000 indicates that
none of the setpoint dimensions should be ignored). Mapping:
bit 1: x, bit 2: y, bit 3: z,
bit 4: vx, bit 5: vy, bit 6: vz,
bit 7: ax, bit 8: ay, bit 9:
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0,
0, 0,
mavutil.mavlink.MAV_FRAME_BODY_NED,
0b0000111111000111, #-- BITMASK -> Consider only the velocities
0, 0, 0, #-- POSITION
vx, vy, vz, #-- VELOCITY
0, 0, 0, #-- ACCELERATIONS
0, 0)
vehicle.send_mavlink(msg)
vehicle.flush()
###################################################################
vehicle = connectMyCopter()
wCam, hCam = 640, 480
deadZone = 100
pTime = 0
cap = cv2.VideoCapture(0)
cap.set(3, wCam)
cap.set(4, hCam)
detector = htm.handDetector(detectionCon=0.8, maxHands=1)
x = [300, 245, 200, 170, 145, 130, 112, 103, 93, 87, 80, 75, 70, 67, 62, 59, 57]
y = [20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100]
coff = np.polyfit(x, y, 2) # y = AX^2 + BX + C
c = []
i = 0
tipIds = [4, 8, 12, 16, 20]
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img, draw=False)
#print(lmList)
if len(lmList) !=0:
fingers = []
# Thumb . Here the x value of thumb tip is compared with the x value of mid thumb
if lmList[tipIds[0]][1] > lmList[tipIds[0] - 1][1]:
fingers.append(1)
else:
fingers.append(0)
# Other Fingers
for id in range(1,5):
if lmList[tipIds[id]][2] < lmList[tipIds[id]-2][2]:
fingers.append(1)
else:
fingers.append(0)
#print(sum(fingers))
x1, y1 = lmList[5][1], lmList[5][2]
x2, y2 = lmList[17][1], lmList[17][2]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
ty = lmList[4][2]
#print(cx, cy)
cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED)
#length = int(math.hypot(x2 - x1, y2 - y1))
#A, B, C = coff
#distanceCM = A*length**2 + B*length + C
#print(distanceCM)
if sum(fingers) == 0:
print(" Arm and Takeoff ")
arm_and_takeoff(2)
if sum(fingers) == 5:
if ((cx < int(wCam/2) + deadZone) and (cx > int(wCam/2) - deadZone)):
print("Hold Position")
set_velocity_body(vehicle, 0, 0, 0)
if (cx < int(wCam/2) - deadZone):
print("Moving Right")
set_velocity_body(vehicle, 0, 0.5, 0)
if (cx > int(wCam/2) + deadZone):
print("Moving Left")
set_velocity_body(vehicle, 0, -0.5, 0)
if sum(fingers) == 1:
if ((ty < int(hCam/2) + deadZone) and (ty > int(hCam/2) - deadZone)):
print("Hold Position")
set_velocity_body(vehicle, 0, 0, 0)
if (ty < int(hCam/2) - deadZone):
print("Moving Up")
set_velocity_body(vehicle, 0, 0, -1)
if (ty > int(hCam/2) + deadZone):
print("Moving Down")
set_velocity_body(vehicle, 0, 0, 1)
#if sum(fingers) == 5:
# c.append(cx)
# if len(c)!=0:
# for i in range(len(c)):
# difference = c[i]-c[i-1]
#print(difference)
# if difference > 0:
# print("Moving Left")
# set_velocity_body(vehicle, 0, -3, 0)
# elif difference < 0:
# print("Moving Right")
# set_velocity_body(vehicle, 0, 3, 0)
# elif difference == 0:
# print("Hold Position")
# set_velocity_body(vehicle, 0, 0, 0)
#
#print(" Moving Right ")
#set_velocity_body(vehicle, distanceCM*0.05, 0, 0)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, f'FPS: {int(fps)}', (40, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
|
2ashishmohan/Hand-Gesture-Controlled-Quadcopter-UAV
|
HandTrackingDemo.py
|
HandTrackingDemo.py
|
py
| 6,355 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70096829629
|
import sys
"""
์ด๋ถ ํ์ ๊ธฐ๋ฐ ํ์ด
์์ ์ง์ ์ด end๋ณด๋ค ์๊ฑฐ๋ ๊ฐ์์ง ๋ ๊น์ง ์งํ
์์์ 1, ๋์ ๋ - ์์์ผ๋ก ์งํ.
๋ ๊ฐ์ ๊ฑฐ๋ฆฌ๋ฅผ ์ต๋๋ก ํ๊ธฐ ์ํด์๋ middle๋ณด๋ค ์ปค์ผํ๋ค.
x๋ฅผ ๊ฐฑ์ ํด์ ๊ณต์ ๊ธฐ ์์น๋ฅผ ์กฐ์ ํ๋ค.
cnt๊ฐ c๋ณด๋ค ํฌ๊ฑฐ๋ ๊ฐ์ ๊ฒฝ์ฐ -> ๊ณต์ ๊ธฐ๋ฅผ ๋ง์ด ์ค์นํ ์ผ์ด์ค -> start ์
๋ฐ์ดํธ ํ๊ณ , ans๋ฅผ middle๋ก ์ค์
cnt๊ฐ c๋ณด๋ค ์ ์ ๊ฒฝ์ฐ -> ๊ณต์ ๊ธฐ๋ฅผ ์ ๊ฒ ์ค์นํ ์ผ์ด์ค -> end ์
๋ฐ์ดํธ
"""
n, c = map(int, sys.stdin.readline().split(" "))
house = []
for _ in range(n):
house.append(int(sys.stdin.readline()))
house.sort()
start = 1
end = house[-1] - house[0]
while start <= end:
middle = (start + end) // 2
x = house[0]
cnt = 1
for i in range(len(house)):
if house[i] - x >= middle:
x = house[i]
cnt += 1
if cnt >= c:
start = middle + 1
ans = middle
elif cnt < c:
end = middle - 1
print(ans)
|
YooGunWook/coding_test
|
๋ฐฑ์ค/๋ฐฑ์ค_2110๋ฒ.py
|
๋ฐฑ์ค_2110๋ฒ.py
|
py
| 1,031 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
10702973519
|
'''from math import prod
a=int(input())
for i in range(10,10001):
if prod(list(map(int,str(i))))==a:
print(i)
break
else:
print("Not Possible")
'''
n=int(input())
a=[]
for i in range(n):
a+=[int(input())]
if n<2:
print("Invalid Input")
else:
a.sort()
if n == a.count(a[0]):
print("Equal")
else:
print(a[0],a[1],sep=" ")
|
hariss0411/Python-Codes
|
tcs_digital.py
|
tcs_digital.py
|
py
| 408 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33145366381
|
def read():
file = open("PuzzleInput_9.txt", "r")
dict = []
line_count = 0
for x in file.readlines():
x = x.strip()
x = int(x)
dict.append(x)
line_count += 1
file.close()
return dict
dict = read()
for x in range(25, len(dict)):
check = False
for y in dict[x-25: x]:
if dict[x] - y in dict[x-25: x]:
check = True
if not check:
invalid_number = dict[x]
print(f"gedeelte 1:{invalid_number}")
index_number = 0
index_check = 0
answer = []
answer_fnd = True
while answer_fnd:
check_sum = 0
index_check = index_number
while True:
if check_sum == invalid_number:
for x in dict[index_number:index_check]:
answer.append(x)
answer_fnd = False
break
if check_sum > invalid_number:
break
if index_check == len(dict):
break
check_sum += dict[index_check]
index_check += 1
index_number += 1
print(f"gedeelte 2:{max(answer) + min(answer)}")
|
ArviWasTaken/AdventOfCode
|
src/main/python/2020/DAY_09/Code_9.py
|
Code_9.py
|
py
| 1,063 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18405151031
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 17:56:03 2020
@author: mints
"""
import logging
import itertools
import joblib
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from pandas.core.common import SettingWithCopyWarning
from semiphore_public.cuda.cudaprocessor import CudaProcessor
from semiphore_public.utils import interpolate
warnings.filterwarnings('ignore', category=AstropyUserWarning, append=True)
warnings.filterwarnings('ignore', category=SettingWithCopyWarning, append=True)
def distance(w1, w2, sed1, sed2, err1, err2):
"""Calculate distance between two SED templates
Args:
w1 (float): weight of the first SED
w2 (float): weight of the second SED
sed1 (float[]): magnitudes of the first SED
sed2 (float[]): magnitudes of the second SED
err1 (float[]): width of the first SED
err2 (float[]): width of the second SED
Returns:
"Distance"
"""
d = (w1 * (sed1 - sed2)**2 / (err1**2 + 1e-2),
w2 * (sed1 - sed2)**2 / (err2**2 + 1e-2))
return np.sum(np.sqrt(d))
def get_order(w1, w2, sed1, sed2, err1, err2):
"""Reorder SEDs. Here all parameters are arrays along the redshift.
Args:
w1 (float[]): weight of the first SED
w2 (float[]): weight of the second SED
sed1 (float[][]): magnitudes of the first SED
sed2 (float[][]): magnitudes of the second SED
err1 (float[][]): width of the first SED
err2 (float[][]): width of the second SED
Returns:
[type]: [description]
"""
nn = len(w1)
d = np.zeros((nn, nn))
for i in range(nn):
for j in range(nn):
d[i, j] = distance(w1[i], w2[j],
sed1[i], sed2[j],
err1[i], err2[j])
smin = np.inf
tOk = None
for t in itertools.permutations(np.arange(nn, dtype=int), nn):
s = 0
for i in range(nn):
s += d[i, t[i]]
if s < smin:
smin = s
tOk = t
return tOk
if __name__ == '__main__':
import argparse
import logging
logger = logging.getLogger("FIT")
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
parser = argparse.ArgumentParser(description="""
Perform a full CUDA-based SED-PhotoZ fit.
""", formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-i', '--input', type=str, default=None,
help='Input filename')
parser.add_argument('-c', '--catalogs', type=str,
default=None, required=True,
help='Input catalogs to use (comma separated)')
parser.add_argument('-o', '--output', type=str, default=None,
help='Output filename')
parser.add_argument('-n', '--nsed', type=int, default=1,
help='Number of SEDs to fit')
parser.add_argument('-V', '--verbose', action="store_true",
default=False,
help='Be verbose')
args = parser.parse_args()
processor = CudaProcessor(args.catalogs.split(','), args.nsed)
results = []
sizes = []
logger.info("Load data from %s", args.catalogs)
processor.load_data(filename=args.input)
z_len = len(processor.z)
is_ok = []
izs = []
# Forward run
for z0, mags, errs in processor.iterate_data(size=1000):
logger.info("Forward run, redshift=%.2f", processor.z[int(z0)])
if len(results) > 0:
output = processor.run_on_data(mags, errs,
custom_params=results[-1][0])
else:
output = processor.run_on_data(mags, errs)
if output is not None:
res, size, _ = output
if res[1] >= processor.MAX_ITERATIONS * args.nsed:
logger.warn(f'Iteration count exceeded for z nr {z0}')
is_ok.append(False)
else:
is_ok.append(True)
results.append(res)
sizes.append(size)
izs.append(z0)
# Backward run:
for ii in range(len(izs)-2, 0, -1):
if not is_ok[ii + 1] or not is_ok[ii]:
continue
old_norm = results[ii][2] / sizes[ii]
if results[ii + 1][2] / sizes[ii + 1] > old_norm:
logger.info("Backward run, redshift=%.2f",
processor.z[int(izs[ii])])
mags, errs = processor.get_data_for_zs(izs[ii])
output = processor.run_on_data(mags, errs,
custom_params=results[ii+1][0])
if output is not None:
res, size, _ = output
if res[2] / size >= results[ii][2] / sizes[ii]:
logger.debug(f'...new l_norm={res[2] / size} is better')
results[ii] = res
sizes[ii] = size
else:
logger.debug(f'...new l_norm={res[2] / size} is lower, rejecting')
iz_min = int(np.ceil(np.min(izs)))
iz_max = int(np.ceil(np.max(izs)))
izs = processor.z[0] + np.array(izs) * 0.02
sed_shape = (z_len, processor.n_seds, len(processor.columns))
output = {'z': processor.z,
'names': processor.names,
'weights': np.zeros((z_len, processor.n_seds)),
'sed': np.zeros(sed_shape),
'err': np.zeros(sed_shape),
'l_values': np.zeros(len(izs)),
'iterations': np.zeros(len(izs)),
'sizes': sizes,
}
w = np.array([results[ii][0][0] for ii in range(len(results))])
sed = np.array([results[ii][0][1] for ii in range(len(results))])
err = np.array([results[ii][0][2] for ii in range(len(results))])
output['iterations'] = np.array([results[ii][1]
for ii in range(len(results))])
output['l_values'] = np.array([results[ii][2]
for ii in range(len(results))])
ind = np.argsort(w)
logger.info("Reordering...")
# Reordering
output['weights00'] = w
output['sed00'] = sed
output['err00'] = err
w_order = [w[0]]
sed_order = [sed[0]]
err_order = [err[0]]
for i in range(0, len(w)-1):
new_order = list(get_order(w_order[i], w[i+1],
sed_order[i], sed[i+1],
err_order[i], err[i+1]))
w_order.append(w[i + 1][new_order])
sed_order.append(sed[i + 1][new_order])
err_order.append(err[i + 1][new_order])
logger.info("Interpolating...")
# Interpolation
output['weights0'] = w_order
output['sed0'] = sed_order
output['err0'] = err_order
output['weights'] = interpolate.curve_processor(izs, np.array(w_order),
processor.z, is_log=True)
output['sed'] = interpolate.curve_processor(izs, np.array(sed_order),
processor.z, is_log=False)
output['err'] = interpolate.curve_processor(izs, np.array(err_order),
processor.z,
is_log=True, bounded=True)
output['weights'] = output['weights'] / \
output['weights'].sum(axis=1)[:, np.newaxis]
output['z_base'] = izs
output['input_file'] = args.input
if args.output is None:
names = '_'.join(processor.names)
outname = f'../calibrations/seds/{names}_{processor.n_seds}seds.joblib'
else:
outname = args.output
logger.info('Saving calibration to %s', outname)
joblib.dump(output, outname)
logger.info("Finished")
|
minzastro/semiphore_public
|
fit/complete_fit.py
|
complete_fit.py
|
py
| 7,837 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7165513164
|
# Answer to almostIncreasingSequence
# https://app.codesignal.com/arcade/intro/level-2/2mxbGwLzvkTCKAJMG
def almostIncreasingSequence(sequence):
droppped = False
last = prev = min(sequence) - 1
for elm in sequence:
if elm <= last:
if droppped:
return False
else:
droppped = True
if elm <= prev:
prev = last
elif elm >= prev:
prev = last = elm
else:
prev, last = last, elm
return True
# I had to take help to solve this.
# And this is taken from a user pharfenmeister
|
CompetitiveCode/CodeSignal
|
Arcade/Intro/Edge of the Ocean/almostIncreasingSequence.py
|
almostIncreasingSequence.py
|
py
| 648 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72579481788
|
from flask import Flask, request, redirect, url_for
from flask_jsonpify import jsonify
from flask import render_template
from flask import abort
from flask import Response
from flask_api import status
import json
from flaskext.mysql import MySQL
import pandas as pd
import requests
from datetime import datetime, timedelta
import matplotlib as plt
import base64
import io
app = Flask(__name__)
mysql = MySQL()
# MySQL configurations
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = ''
app.config['MYSQL_DATABASE_DB'] = 'cloud'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
db = mysql.connect()
cursor = db.cursor()
@app.route('/')
def homepage():
return render_template('forecast.html')
@app.route('/historical/', methods=['GET','POST']) #lists all the dates
def historical():
if(request.method=='GET'):
dates_list = []
cursor.execute("select DATE from dailyweather")
query=cursor.fetchall()
my_hist = [i[0] for i in query]
for item in my_hist:
a = {"DATE":str(item)}
dates_list.append(a)
js = json.dumps(dates_list)
return js, 200
else:
l=request.get_json()
d=l['DATE']
tmax=l['TMAX']
tmin=l['TMIN']
obj = {}
cursor.execute("select DATE from dailyweather")
q=cursor.fetchall()
list=[i[0] for i in q]
x=0
for item in list:
if(int(d)==item):
x=1
if(x==1):
cursor.execute("update dailyweather set TMAX=%f, TMIN=%f where DATE=%d" %(float(tmax),float(tmin),int(d)))
else:
cursor.execute("insert into dailyweather values(%d,%f,%f)" % (int(d),float(tmax),float(tmin)))
db.commit()
obj={"DATE":str(d)}
return jsonify(obj), 201
@app.route('/historical/<string:DATE>', methods=['GET']) #gets the weather info of a particular day
def get_info(DATE):
obj = {}
l=[]
cursor.execute("select DATE,TMAX,TMIN from dailyweather where DATE =%d" % int(DATE))
q=cursor.fetchall()
if(len(q)>0):
for i in range(3):
l.append(q[0][i])
obj = {
"DATE": str(l[0]),
"TMAX": l[1],
"TMIN": l[2]
}
return jsonify(obj), 200
else:
return '', 404
@app.route('/historical/<int:DATE>', methods=['DELETE'])
def del_info(DATE):
obj={}
l=[]
cursor.execute("select DATE,TMAX,TMIN from dailyweather where DATE=%d" % int(DATE))
query=cursor.fetchall()
cursor.execute("delete from dailyweather where DATE=%d" % int(DATE))
db.commit()
if(len(query)>0):
for i in range(3):
l.append(str(query[0][i]))
obj = {
"DATE": l[0],
"TMAX": l[1],
"TMIN": l[2]
}
return jsonify(obj), 200
else:
return '', 204
@app.route('/forecast/<DATE>', methods=['GET']) #forecasts weather info of the next 7days
def forecast(DATE):
lst_dates = []
lst_obj = []
current_date = pd.to_datetime(DATE,format='%Y%m%d')
stop_date = current_date+timedelta(days=7)
while current_date<stop_date:
lst_dates.append(str(pd.to_datetime(current_date)).split(' ')[0].replace("-",""))
current_date = current_date+timedelta(days=1)
for curr_date in lst_dates:
cursor.execute("select DATE,TMAX,TMIN from dailyweather where DATE =%d" % int(curr_date))
query=cursor.fetchall()
if (len(query) > 0):
obj = {
"DATE": curr_date,
"TMAX": query[0][1],
"TMIN": query[0][2]
}
lst_obj.append(obj)
else:
cursor.execute("select ROUND(RAND()*(80-75+1),1)+75")
q=cursor.fetchall()
cursor.execute("select ROUND(RAND()*(50-45+1),1)+45")
q1=cursor.fetchall()
obj = {
"DATE": curr_date,
"TMAX": q[0][0],
"TMIN": q1[0][0]
}
lst_obj.append(obj)
return jsonify(lst_obj), 200
if __name__ == '__main__':
app.run(host='0.0.0.0',debug=True,port=80)
|
cotraak/weather-app-flask
|
app.py
|
app.py
|
py
| 4,277 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70373498108
|
from typing import Optional, Any, Union, Callable
import torch
from torch import Tensor
import torch.nn.functional as F
from torch.nn.modules import Module
from .linear import Linear
from .normalization import LayerNorm
from .activation import MultiheadAttention
from .dropout import Dropout
class TransformerEncoderLayer(Module):
r"""Pytorch 2.0
TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of the intermediate layer, can be a string
("relu" or "gelu") or a unary callable. Default: relu
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
norm_first: if ``True``, layer norm is done prior to attention and feedforward
operations, respectively. Otherwise it's done after. Default: ``False`` (after).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
Alternatively, when ``batch_first`` is ``True``:
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True)
>>> src = torch.rand(32, 10, 512)
>>> out = encoder_layer(src)
Fast path:
forward() will use a special optimized implementation described in
`FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`_ if all of the following
conditions are met:
- Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor
argument ``requires_grad``
- training is disabled (using ``.eval()``)
- batch_first is ``True`` and the input is batched (i.e., ``src.dim() == 3``)
- activation is one of: ``"relu"``, ``"gelu"``, ``torch.functional.relu``, or ``torch.functional.gelu``
- at most one of ``src_mask`` and ``src_key_padding_mask`` is passed
- if src is a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_, neither ``src_mask``
nor ``src_key_padding_mask`` is passed
- the two ``LayerNorm`` instances have a consistent ``eps`` value (this will naturally be the case
unless the caller has manually modified one without modifying the other)
If the optimized implementation is in use, a
`NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be
passed for ``src`` to represent padding more efficiently than using a padding
mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ will be
returned, and an additional speedup proportional to the fraction of the input that
is padding can be expected.
.. _`FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`:
https://arxiv.org/abs/2205.14135
"""
__constants__ = ["batch_first", "norm_first"]
def __init__(
self,
d_model: int,
nhead: int,
dim_feedforward: int = 2048,
dropout: float = 0.1,
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
layer_norm_eps: float = 1e-5,
batch_first: bool = False,
norm_first: bool = False,
device=None,
dtype=None,
B: int = 1,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.B = B
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first, B=B, **factory_kwargs)
# Implementation of Feedforward model
self.linear1 = Linear(d_model, dim_feedforward, B=B, **factory_kwargs)
self.dropout = Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model, B=B, **factory_kwargs)
self.norm_first = norm_first
self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, B=B, **factory_kwargs)
self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, B=B, **factory_kwargs)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
# For Hydro scaling
self.d_model = d_model
self.nhead = nhead
self.dim_feedforward = dim_feedforward
self.dropout_value = dropout
self.layer_norm_eps = layer_norm_eps
self.batch_first = batch_first
# Legacy string support for activation function.
if isinstance(activation, str):
self.activation = _get_activation_fn(activation)
else:
self.activation = activation
# We can't test self.activation in forward() in TorchScript,
# so stash some information about it instead.
if activation is F.relu or isinstance(activation, torch.nn.ReLU):
self.activation_relu_or_gelu = 1
elif activation is F.gelu or isinstance(activation, torch.nn.GELU):
self.activation_relu_or_gelu = 2
else:
self.activation_relu_or_gelu = 0
def __setstate__(self, state):
super().__setstate__(state)
if not hasattr(self, "activation"):
self.activation = F.relu
def forward(
self,
src: Tensor,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
is_causal: bool = False,
) -> Tensor:
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
is_causal: If specified, applies a causal mask as src_mask.
Default: ``False``.
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
src_key_padding_mask = F._canonical_mask(
mask=src_key_padding_mask,
mask_name="src_key_padding_mask",
other_type=F._none_or_dtype(src_mask),
other_name="src_mask",
target_type=src.dtype,
)
# Fast path NOT support training
# see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
why_not_sparsity_fast_path = ""
if not src.dim() == 3:
why_not_sparsity_fast_path = f"input not batched; expected src.dim() of 3 but got {src.dim()}"
elif self.training:
why_not_sparsity_fast_path = "training is enabled"
elif not self.self_attn.batch_first:
why_not_sparsity_fast_path = "self_attn.batch_first was not True"
elif not self.self_attn._qkv_same_embed_dim:
why_not_sparsity_fast_path = "self_attn._qkv_same_embed_dim was not True"
elif not self.activation_relu_or_gelu:
why_not_sparsity_fast_path = "activation_relu_or_gelu was not True"
elif not (self.norm1.eps == self.norm2.eps):
why_not_sparsity_fast_path = "norm1.eps is not equal to norm2.eps"
elif src.is_nested and (src_key_padding_mask is not None or src_mask is not None):
why_not_sparsity_fast_path = "neither src_key_padding_mask nor src_mask are not supported with NestedTensor input"
elif self.self_attn.num_heads % 2 == 1:
why_not_sparsity_fast_path = "num_head is odd"
elif torch.is_autocast_enabled():
why_not_sparsity_fast_path = "autocast is enabled"
if not why_not_sparsity_fast_path:
tensor_args = (
src,
self.self_attn.in_proj_weight,
self.self_attn.in_proj_bias,
self.self_attn.out_proj.weight,
self.self_attn.out_proj.bias,
self.norm1.weight,
self.norm1.bias,
self.norm2.weight,
self.norm2.bias,
self.linear1.weight,
self.linear1.bias,
self.linear2.weight,
self.linear2.bias,
)
# We have to use list comprehensions below because TorchScript does not support
# generator expressions.
if torch.overrides.has_torch_function(tensor_args):
why_not_sparsity_fast_path = "some Tensor argument has_torch_function"
elif not all((x.is_cuda or "cpu" in str(x.device)) for x in tensor_args):
why_not_sparsity_fast_path = "some Tensor argument is neither CUDA nor CPU"
elif torch.is_grad_enabled() and any(x.requires_grad for x in tensor_args):
why_not_sparsity_fast_path = (
"grad is enabled and at least one of query or the "
"input/output projection weights or biases requires_grad"
)
if not why_not_sparsity_fast_path:
merged_mask, mask_type = self.self_attn.merge_masks(src_mask, src_key_padding_mask, src)
return torch._transformer_encoder_layer_fwd(
src,
self.self_attn.embed_dim,
self.self_attn.num_heads,
self.self_attn.in_proj_weight,
self.self_attn.in_proj_bias,
self.self_attn.out_proj.weight,
self.self_attn.out_proj.bias,
self.activation_relu_or_gelu == 2,
self.norm_first,
self.norm1.eps,
self.norm1.weight,
self.norm1.bias,
self.norm2.weight,
self.norm2.bias,
self.linear1.weight,
self.linear1.bias,
self.linear2.weight,
self.linear2.bias,
merged_mask,
mask_type,
)
x = src
if self.norm_first:
x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)
x = x + self._ff_block(self.norm2(x))
else:
x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask))
x = self.norm2(x + self._ff_block(x))
return x
# self-attention block
def _sa_block(self, x: Tensor, attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:
x = self.self_attn(x, x, x, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)[0]
return self.dropout1(x)
# feed forward block
def _ff_block(self, x: Tensor) -> Tensor:
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout2(x)
def extra_repr(self) -> str:
s = "{d_model}, {nhead}, dim_feedforward={dim_feedforward}, dropout={dropout_value}, layer_norm_eps={layer_norm_eps}, B={B}"
if self.activation != F.relu:
if isinstance(self.activation, str):
s += ", activation={activation}"
else:
s += ", activation={activation.__name__}"
if self.batch_first:
s += ", batch_first=True"
if self.norm_first:
s += ", norm_first=True"
return s.format(**self.__dict__)
def _get_activation_fn(activation: str) -> Callable[[Tensor], Tensor]:
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
raise RuntimeError("activation should be relu/gelu, not {}".format(activation))
|
S-Lab-System-Group/Hydro
|
hydro/fuse_ops/transformer.py
|
transformer.py
|
py
| 12,254 |
python
|
en
|
code
| 18 |
github-code
|
6
|
2929688634
|
"""
Python script, Python v 2.7
written 1/30/17 by James Novakowski
This script was developed to summarize anonymized claims data for Radial
Analytics as part of a coding assessment.
To run the script, the script file should be in the same folder as the
data file, called "data.csv".
The script will generate an output csv file called "State_Level_Summary.csv",
which will provide a summary of claims data by state, gender, and age.
Note: The current implementation of this script leaves out data points
where the state is undefined, the gender is undefined, or the age is undefined.
The sript will also generate an output file called
"Claims_Utilization_Summary.csv", whcih will provide a summary of claims data
by the Utilization Range, providing the counts of claims and the percentage
of claims that fall into each range bucket.
"""
import csv
#Create data dictionary for state summary data
data = {}
state_code = range(1,67)
for item in range(97,100):
state_code.append(item)
for state in state_code:
data[state] = {"male":0,
"female":0,
"age_under_65":0,
"age_65_to_74":0,
"age_over_74":0,
"state":str(state)
}
#Create data dictionary for Utilization Range data
util_days = {}
util_code = range(0,6)
for item in util_code:
util_days[str(item)] = 0
util_days["6_to_10"] = 0
util_days["11_to_30"] = 0
util_days["over_30"] = 0
"""
Read the data from the csv file to a dictionary.
Note: Current implementation ignores values 0: Unknown
in Gender and Age fields.
Then: Summarize the data.
Data fields coded as follows:
Gender Code from Claim
0 = Unknown
1 = Male
2 = Female
LDS Age Category
0 = Unknown
1 = <65
2 = 65 Thru 69
3 = 70 Thru 74
4 = 75 Thru 79
5 = 80 Thru 84
6 = >84
And want the following fields in the final tabulation:
State
Gender (male)
Gender (female)
Age (under 65)
Age (65-74)
Age (75 +)
"""
data_file = "data.csv"
f = open(data_file)
d = csv.DictReader(f)
for row in d:
#print row
age = int(row["LDS Age Category"])
gender = int(row["Gender Code from Claim"])
state = int(row["State Code from Claim (SSA)"])
day_count = int(row["Claim Utilization Day Count"])
#Read the data into the data nested dictionary
if gender == 1:
data[state]["male"] += 1
elif gender == 2:
data[state]["female"] += 1
if age == 1:
data[state]["age_under_65"] += 1
elif age > 1 and age < 4:
data[state]["age_65_to_74"] += 1
elif age >= 4:
data[state]["age_over_74"] += 1
if day_count < 6:
util_days[str(day_count)] += 1
elif day_count >= 6 and day_count <= 10:
util_days["6_to_10"] += 1
elif day_count >= 11 and day_count <= 30:
util_days["11_to_30"] += 1
elif day_count > 30:
util_days["over_30"] += 1
f.close()
"""
Generate an output csv file for the state claim summary data.
"""
with open("State_Level_Summary.csv", 'w') as csvfile:
fieldnames = ['state',
'female',
'male',
'age_under_65',
'age_65_to_74',
'age_over_74']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'state':'State',
'female':'Female',
'male':'Male',
'age_under_65':'Ages < 65',
'age_65_to_74':'Ages 65-74',
'age_over_74':'Ages75+'
})
for state in data:
writer.writerow(data[state])
"""
Generate an output csv file for the utilization days summary data.
Also use this step to calculate the total claims, and the percentage
of claims falling into each utilization range bucket.
"""
total_claims = 0
for key, value in util_days.iteritems():
total_claims += value
with open("Claims_Utilization_Summary.csv", 'w') as csvfile:
fieldnames = ['Utilization Range',
'Counts',
'Percentages']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for key, value in util_days.iteritems():
if value > 0:
percent = (value / float(total_claims)) * 100
percent = round(percent, 2)
percent = str(percent) + "%"
else:
percent = "0.00%"
new_row = {'Utilization Range':key,
'Counts':str(value),
'Percentages':percent}
writer.writerow(new_row)
|
jamesnovakowski/python_examples_rsg
|
State_Summary_and_Utilization_Range_Summary.py
|
State_Summary_and_Utilization_Range_Summary.py
|
py
| 4,569 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15426226201
|
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), 'classes'))
from classes import data_manager
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import StackingClassifier, VotingClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense, Embedding, GlobalMaxPool1D, Conv1D
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.models import Sequential, load_model
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import autokeras as ak
from xgboost import XGBRegressor
from xgboost import XGBClassifier
# Prediction models:
# regressors and classifiers that take a positinal embedding vector as input,
# output storypoints (or other impact related value)
seed=42
class regressors:
@staticmethod
def get_autokeras_paraphrase5():
model = load_model("regression_models/autokeras5_desc_paraphrase_rmse", custom_objects=ak.CUSTOM_OBJECTS)
return model
@staticmethod
def get_autokeras_roberta3_mae():
model = load_model("regression_models/autokeras3_roberta_mae", custom_objects=ak.CUSTOM_OBJECTS)
return model
@staticmethod
def keras_convolutional(X_train, y_train, X_test, y_test, vocab_size, max_len):
#https://realpython.com/python-keras-text-classification/
callbacks = [
EarlyStopping(patience=5, restore_best_weights=True, mode='min')
]
model = Sequential()
model.add(Embedding(input_dim=vocab_size+1,
output_dim=50,
input_length=max_len))
model.add(Conv1D(50, 5, activation='relu'))
model.add(GlobalMaxPool1D())
model.add(Dense(units=25, activation='relu'))
model.add(Dense(units=1, activation='relu'))
model.compile(optimizer=Adam(learning_rate=0.0001),
loss='mae',
metrics=['mse'],
run_eagerly=True)
history = model.fit(X_train, y_train,
epochs=15,
verbose=True,
validation_data=(X_test, y_test),
batch_size=50,
callbacks=callbacks)
return model
@staticmethod
def create_MLP(X, y):
model = MLPRegressor(random_state=seed)
model = model.fit(X, y)
pipe = Pipeline([('mlp', model)])
param_grid = {
'mlp__solver': ['sgd'],
'mlp__alpha': [0.01],
'mlp__learning_rate_init': [0.0001],
'mlp__max_iter': [300]
}
gs = gridsearch(pipe, param_grid, 'neg_mean_squared_error')
gs.fit(X, y)
data_manager.print_gridsearch_best_stats(gs)
return model
@staticmethod
def create_SVR(X, y):
model = svm.SVR()
pipe = Pipeline([('standardize', StandardScaler()),
('svr', model)])
param_grid = {
'svr__C': [1.75], #1.957,1.8,2,1.7 #multi lang 1.75
'svr__gamma': ['scale'],
'svr__kernel': ['rbf'],
'svr__epsilon': [0.01], #0.1,0.01 #multi lang 0.01
'svr__degree': [2] #2,3,4
}
gs = gridsearch(pipe, param_grid, 'neg_mean_absolute_error') #neg_mean_squared_error
gs.fit(X, y)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod
def create_Randomforest(X, y):
model = RandomForestRegressor(random_state=seed, n_estimators=300, min_samples_leaf=4, max_depth=20)
pipe = Pipeline([('rtree', model)])
param_grid = {
# 'rtree__n_estimators': [300],
# 'rtree__min_samples_leaf': [4],
# 'rtree__max_depth': [20]
}
gs = gridsearch(pipe, param_grid, 'neg_mean_absolute_error')
gs.fit(X, y)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod
def create_XGBregressor(X_train, y_train):
model = XGBRegressor(learning_rate=0.001,
n_estimators=400,
n_jobs=5,
random_state=seed)
pipe = Pipeline([('XGB', model)])
param_grid = {
}
gs = gridsearch(pipe, param_grid, 'neg_mean_squared_error')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return model
@staticmethod
def keras_sequential_network(X_train, y_train, X_test, y_test, lr=0.001):
input_dim = len(X_train[0])
callbacks = [
EarlyStopping(patience=5, restore_best_weights=True, mode='min')
]
model = Sequential()
model.add(Dense(100, input_dim=input_dim, kernel_initializer='normal', activation='relu'))
model.add(Dense(20, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer=Adam(learning_rate=lr), metrics=['mse', 'mae'], run_eagerly=True)
model.fit(X_train, y_train,
epochs=15,
verbose=True,
validation_data=(X_test, y_test),
batch_size=50,
callbacks=callbacks)
pipe = Pipeline([('nn', model)])
param_grid = {}
gs = gridsearch(pipe, param_grid, 'neg_mean_squared_error')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return model
class classifiers(object):
@staticmethod #0.7133 - F 0.7220 - H 0.7281 - H2 0.73152
def create_mlpclassifier(X_train, y_train):
model = MLPClassifier(random_state=seed)
pipe = Pipeline([('standardize', StandardScaler()),
('sgd', model)])
param_grid = {
'mlp__max_iter':[200], #200, 400, 600, 800 | 200
'mlp__solver':['adam'], #'adam', 'lbfgs' | 'adam'
'mlp__alpha':[0.001], #0.0001, 0.001 | 0.001
'mlp__batch_size':[50], #100, 150, 200, 400 | 50
'mlp__learning_rate_init':[0.0001] #0.01, 0.001, 0.0001 | 0.0001
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod #0.6709 - F 0.6817
def create_Randomforest(X_train, y_train):
model = RandomForestClassifier(random_state=seed)
pipe = Pipeline([('standardize', StandardScaler()),
('sgd', model)])
param_grid = {
# 'rtree__n_estimators': [700], #best from range 150 - 700
# 'rtree__min_samples_leaf': [2], #best from range 1 - 7
# 'rtree__max_depth': [20]
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod #0.7147 - F 0.7206 - H 0.7256 - H2 0.72656
def create_XGB(X_train, y_train):
model = XGBClassifier(seed=seed,
use_label_encoder=False)
pipe = Pipeline([('standardize', StandardScaler()),
('xgb', model)])
param_grid = {
'xgb__learning_rate':[0.05, 0.03], #0.2, 0.1, 0.15, 0.01 | 0.05
'xgb__n_estimators':[600, 800], #100, 300, 400, 500 | 600
'xgb__max_depth':[7], #4, 5, 6, 7, 8 | 7
'xgb__colsample_bytree':[0.2], #0.1, 0.2 | 0.2
'xgb__reg_lambda':[4, 6, 8] #1, 2, 3, 4 | 4
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod #0.6750 - F 0.6885
def create_GB(X_train, y_train):
#max_depth=6, n_estimators=500, random_state=42))])
# best parms: {'gb__learning_rate': 0.1, 'gb__max_depth': 6, 'gb__n_estimators': 500}
model = GradientBoostingClassifier(random_state=seed)
pipe = Pipeline([('standardize', StandardScaler()),
('gb', model)])
param_grid = {
# 'gb__n_estimators': [500], #50 - 600
# 'gb__learning_rate': [0.1], #0.2 - 0.01
# 'gb__max_depth': [6], #1-7
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod #0.7152 - F 0.7195 H 0.73417
def create_SVC(X_train, y_train):
model = svm.SVC(random_state=seed,
probability=True)
pipe = Pipeline([('standardize', StandardScaler()),
('svc', model)])
param_grid = {
'svc__kernel': ['rbf'], #'rbf', 'linear' | rbf
'svc__degree': [2], #2,3,4 | 2
'svc__gamma': ['scale'], #'auto', 'scale' | 'scale'
'svc__C': [1.95] #1, 1.95 | 1.95
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod #0.6670 - F 0.6735
def create_KNN(X_train, y_train):
model = KNeighborsClassifier()
pipe = Pipeline([('standardize', StandardScaler()),
('KNN', model)])
param_grid = {
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod #0.6764 - F 0.667
def create_SGD(X_train, y_train):
model = SGDClassifier(random_state=seed)
pipe = Pipeline([('standardize', StandardScaler()),
('sgd', model)])
param_grid = {
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod #F - 0.7311 - H2 0.73587
def create_voting(X_train, y_train):
SVC = svm.SVC(random_state=seed,
probability=True,
kernel='rbf',
degree=2,
gamma='scale',
C=1.95)
XGB = XGBClassifier(seed=seed,
learning_rate=0.05,
n_estimators=600,
max_depth=7,
reg_lambda=4,
colsample_bytree=0.2,
use_label_encoder=False)
MLP = MLPClassifier(random_state=seed,
max_iter=200,
solver='adam',
alpha=0.001,
batch_size=50,
learning_rate_init=0.0001)
estimators = [
('svc', SVC),
('xgb', XGB),
('mlp', MLP)
]
model = VotingClassifier(
estimators=estimators,
voting='soft',
weights=[1,1,1],
n_jobs=-1,
verbose=True)
pipe = Pipeline([('standardize', StandardScaler()),
('vc', model)])
param_grid = {
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
print('voting done')
return gs
@staticmethod #F - 0.72848 - H2 0.7373
def create_stacking(X_train, y_train):
SVC = svm.SVC(random_state=seed,
probability=True,
kernel='rbf',
degree=2,
gamma='scale',
C=1.95)
XGB = XGBClassifier(seed=seed,
learning_rate=0.05,
n_estimators=600,
max_depth=7,
reg_lambda=4,
colsample_bytree=0.2,
use_label_encoder=False)
MLP = MLPClassifier(random_state=seed,
max_iter=200,
solver='adam',
alpha=0.001,
batch_size=50,
learning_rate_init=0.0001)
estimators = [
('svc', SVC),
('xgb', XGB),
('mlp', MLP)
]
model = StackingClassifier(
estimators=estimators,
final_estimator=LogisticRegression(random_state=42)
)
pipe = Pipeline([('standardize', StandardScaler()),
('stack', model)])
param_grid = {
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
print('stacking done')
data_manager.print_gridsearch_best_stats(gs)
return gs
@staticmethod
def create_logisticregression(X_train, y_train):
model = LogisticRegression(random_state=42)
pipe = Pipeline([('standardize', StandardScaler()),
('lg', model)])
param_grid = {
'lg__max_iter':[600]
}
gs = gridsearch(pipe, param_grid, 'recall_macro')
gs.fit(X_train, y_train)
data_manager.print_gridsearch_best_stats(gs)
return gs
def gridsearch(pipe, param_grid, metric):
gs = GridSearchCV(pipe,
param_grid,
verbose=0,
cv=5,
scoring=metric,
n_jobs=4,
return_train_score=True)
return gs
|
JaapvDijk/PredictTaskImpactNLP
|
classes/prediction_models.py
|
prediction_models.py
|
py
| 14,556 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28428365301
|
import os
from app import create_app
if __name__ == '__main__':
verify_token = os.getenv("VERIFY_TOKEN", None)
access_token = os.getenv("ACCESS_TOKEN", None)
url = os.getenv("URL", None)
if not verify_token:
raise Exception("verify_token not set")
if not access_token:
raise Exception("access_token not set")
env = {
"VERIFY_TOKEN": verify_token,
"ACCESS_TOKEN": access_token,
"URL": url
}
app = create_app.create_app(env=env)
app.logger.info("Initializing")
app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
|
lolney/messenger-gpt2-chatbot
|
server/app.py
|
app.py
|
py
| 619 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18834111771
|
# -*- coding: utf-8 -*-
# Author๏ผsen
# Date๏ผ9090/3/94 10:48
from typing import List
from heapq import *
class Solution:
def majorityElement(self, nums: List[int]) -> int:
from collections import Counter
counter = Counter(nums)
for item in counter.items(): # item: (ๅ
็ด , ๆฐ้)
if item[1] > (len(nums) / 2.0):
return item[0]
class Solution2:
def majorityElement(self, nums: List[int]) -> int:
# ไธไฝฟ็จ่ชๅธฆCounter
counter = {}
for num in nums:
counter[num] = counter.get(num, 0) + 1
for item in counter.items():
if item[1] > (len(nums) / 2.0):
return item[0]
if __name__ == '__main__':
nums = [9,9,8,8,8,9,9]
so = Solution()
print(so.majorityElement(nums))
so = Solution2()
print(so.majorityElement(nums))
|
PandoraLS/CodingInterview
|
ProgrammingOJ/LeetCode_python/169_ๅคๆฐๅ
็ด .py
|
169_ๅคๆฐๅ
็ด .py
|
py
| 892 |
python
|
en
|
code
| 2 |
github-code
|
6
|
22248410521
|
"""
Very simple HTTP server in python for logging requests
Usage::
./server.py [<port>]
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib import parse
import os
import logging
class S(BaseHTTPRequestHandler):
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_POST(self):
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
# logging.info("POST request,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n",
# str(self.path), str(self.headers), post_data.decode('utf-8'))
standard = post_data.decode("utf-8")
dump = parse.parse_qs(standard)
if "type" in dump.keys():
if dump["type"][0] == "vote":
writeVoteToFile(dump["titleID"][0])
if dump["type"][0] == "chat":
writeChatToFile(dump)
self._set_response()
def run(server_class=HTTPServer, handler_class=S, port=3000):
# logging.basicConfig(level=logging.INFO)
server_address = ('', port)
httpd = server_class(server_address, handler_class)
# logging.info('Starting httpd...\n')
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
# logging.info('Stopping httpd...\n')
# Safety operations
def doesIDExist(titleID):
with open("/resources/text/titleinfo.txt", "r") as f:
for line in f:
if line[8:16] == titleID[8:16]:
return True
return False
# Saving operations
def writeVoteToFile(titleID):
if doesIDExist(titleID):
with open("/resources/text/vote.txt", "a") as f:
f.write(titleID + "\r")
os.system("echo \"$(tail -n 200 /resources/text/vote.txt)\" > /resources/text/vote.txt")
else:
print("Could not write vote for: " + titleID)
def writeChatToFile(details):
with open("/resources/text/msg.txt", "a") as f:
f.write(details["author"][0] + ";;" + details["time"][0] + ";;" + details["message"][0] +"\r")
os.system("echo \"$(tail -n 200 /resources/text/msg.txt)\" > /resources/text/msg.txt")
# Main function
if __name__ == '__main__':
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
|
jacquesCedric/newWWP-server
|
listener/Listener.py
|
Listener.py
|
py
| 2,426 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4072987057
|
#Lokaverkefni For3
#Hรถfundur: Jรณn Benediktsson
#dags: 27.12.2017
from ctypes import *
from random import randint
import color_console as cons
import sys
#linkar
#Print here commandiรฐ
#https://rosettacode.org/wiki/Terminal_control/Cursor_positioning#Python
#litir
#https://www.burgaud.com/bring-colors-to-the-windows-console-with-python/
#"bitwise or"
#https://wiki.python.org/moin/BitwiseOperators
default_colors = cons.get_text_attr()
default_bg = default_colors & 0x0070
default_fg = default_colors & 0x0007
class COORD(Structure):
pass
COORD._fields_ = [("X", c_short), ("Y", c_short)]
def print_at(r, c, s):
h = windll.kernel32.GetStdHandle(-11)
windll.kernel32.SetConsoleCursorPosition(h, COORD(c, r))
c = s.encode("UTF-8")
windll.kernel32.WriteConsoleA(h, c_char_p(c), len(c), None, None)
#=================================================================
# minn kรณรฐi
#=================================================================
#color format til aรฐ geta haldiรฐ รญ vissa liti fyrir vissa hluti
class Color_Form():
def __init__(self, *args):
TempBin=0x0000 #รฉg reyndi aรฐ lรฆra aรฐeins รก รพetta og รฉg bara fatta ekki hvaรฐ รพetta รพรฝรฐir
for x in args: #รพaรฐ รก aรฐ vera einhverskonar binary number hlutur eรฐa eithvaรฐ
TempBin=(TempBin|x) #og รพetta hรฉr รก aรฐ gera "bitwise or" รฉg nรกรฐi รพessu samt til aรฐ virka
self.__Color__=TempBin
def GetColor(self):
return self.__Color__
#Standard fyrir munstriรฐ รก borderinum
#0-1-2
#| |
#7 8 3
#| |
#6-5-4
#รพetta gerir รพannig"โโโโโโโโ "
#รพetta
#โโโ
#โ โ
#โโโ
RandomEn=[["Ananas",5,1,-10,5],["Snakur",10,3,1,20],["Dvergur",30,5,-1,100]]
class kassi():
def __init__(self,LiturBorder,LiturInni,X,Y,Breydd,Haed,Munstur):
self.LiturBorder=LiturBorder
self.LiturInni=LiturInni
self.X=Y
self.Y=X
self.Breydd=Haed #รพaรฐ var einhver villa รญ รพessu hjรก mรฉr svo รฉg bara vรญxlaรฐi รพvรญ
self.Haed=Breydd #print at gerir y svo x รญ staรฐ x,y รพaรฐ er vandamรกliรฐ
self.Munstur=Munstur
def teikna(self):
PH="0" #heldur um munstriรฐ sem รก aรฐ prenta eรฐa Print Holder
Litur=0x0000 #heldur um litin sem รก aรฐ nota
for x in range(self.Breydd):
for y in range(self.Haed):
if x+y==0: #0
PH=self.Munstur[0]
Litur=self.LiturBorder
elif y == self.Haed-1 and x ==0: #2
PH = self.Munstur[2]
Litur = self.LiturBorder
elif x == self.Breydd-1 and y ==0: #4
PH = self.Munstur[4]
Litur = self.LiturBorder
elif x == self.Breydd-1 and y == self.Haed-1: #6
PH = self.Munstur[6]
Litur = self.LiturBorder
elif y==0: #7
PH = self.Munstur[7]
Litur = self.LiturBorder
elif x==self.Breydd-1: #5
PH = self.Munstur[5]
Litur = self.LiturBorder
elif y==self.Haed-1: #3
PH = self.Munstur[3]
Litur = self.LiturBorder
elif x==0: #1
PH = self.Munstur[1]
Litur = self.LiturBorder
else: #8
PH = self.Munstur[8]
Litur = self.LiturInni
cons.set_text_attr(Litur)
print_at((self.X+x),(self.Y+y),PH)
#litir sem eru notaรฐir รญ kassana รก skjรกnum og fleyrra
Grunnur= Color_Form(default_bg,default_fg)
Border= Color_Form(cons.FOREGROUND_BLACK,cons.BACKGROUND_GREY)
StatsColor=Color_Form(default_bg,cons.FOREGROUND_MAGENTA)
CommandColor=Color_Form(default_bg,cons.FOREGROUND_RED)
#samsetninginn og teknuninn รก menuinu
MainBack=kassi(Border.GetColor(),Border.GetColor(),0,0,120,47,"0-0|0-0| ")
MainBack.teikna()
MainPlay=kassi(Border.GetColor(),Grunnur.GetColor(),5,2,61,32,"0-0|0-0| ")
MainPlay.teikna()
StatsBox=kassi(StatsColor.GetColor(),Grunnur.GetColor(),80,3,20,30,"0-0|0-0| ")
StatsBox.teikna()
CommandBox=kassi(CommandColor.GetColor(),Grunnur.GetColor(),7,35,59,8,"0-0|0-0| ")
CommandBox.teikna()
CTLoc=[37,9] #command retun location
print_at(0,0,"")
input()
class Character():
def __init__(self,Nafn,MaxHp,Str,Dex,Vopn,Def,Agi,Int):
self._Nafn=Nafn
self._MaxHp=MaxHp
self._Str=Str
self._Dex=Dex
self._Vopn=Vopn
self._Def=Def
self._Agi=Agi
self._Int=Int
self._Peningar=0
self._lookrange=5
self._Hp=MaxHp
def AddMoney(self,Ammount):
self._Peningar=self._Peningar+Ammount
def Money(self):
return self._Peningar
def Look(self):
return self._lookrange
def Attack(self):
return self._Vopn.Dmg()
def Recive_Damage(self,Damage):
if Damage > self._Def:
pass
else:
self._Hp=self._Hp-Damage+self._Def
def Print_Stats(self):
cons.set_text_attr(default_colors)
print_at(4,89-(len("Hp: "+str(self._Hp)+"/"+str(self._MaxHp))//2),"Hp: "+str(self._Hp)+"/"+str(self._MaxHp))
health_bar="<|==============|>"
cons.set_text_attr(default_bg|cons.FOREGROUND_GREEN)
print_at(5, 81 ,health_bar)
if self._Hp<self._MaxHp:
cons.set_text_attr(default_bg|cons.BACKGROUND_RED)
print_at(4, 90 ,"|>")
class items():
def __init__(self,verd,typa):
self._verd=verd
self._typa=typa
def GetType(self):
return self._typa
def GetWorth(self):
return self._verd
class Vopn(items):
def __init__(self,Drif,Dmg,Virdi):
items.__init__(self,Virdi,"Vopn")
self._Drif=Drif
self._Dmg=Dmg
def Drif(self):
return self._Drif
def Dmg(self):
return self._Dmg
class Enemy():
def __init__(self,Nafn,Hp,Dmg,Agi,Gold):
self._Nafn=Nafn
self._Stafur=Nafn[0]
self._Hp=Hp
self._Dmg=Dmg
self._Agi=Agi
self._Gold=Gold
self._seen=False
self._Dead=False
def GetAgi(self):
return self._Agi
def GetStafur(self):
return self._Stafur
def IsDead(self):
return self._Dead
def Loot(self):
goldcar=int(self._Gold)
self._Gold=0
return goldcar
def Recive_Damage(self,Damage):
self._Hp=self._Hp-Damage
if self._Hp<1:
self._Dead=True
CTL("Thu drapst eithvad sem bar nafnid "+self._Nafn)
else:
CTL("Thu gerdir aras a eithvad sem ber nafnid " + self._Nafn)
def Attack(self):
return self._Dmg
Grasyda=Vopn(1,4,70)
Anton=Character("Anton",20,2,2,Grasyda,1,1,0)
class Map():
def __init__(self,File):
self._enemypos=[]
self._itempos=[]
self._Characterpos=[]
self._Enemies=[]
#รพarf aรฐ laga รพetta seinna svo รพetta sรฉ ekki eins mikiรฐ klusterfuck
skra=open(File,"r",encoding="UTF-8")
tempCopy=skra.read()
skra.close()
tempcopyx=tempCopy.split("\n")
self._Holder=[]#รพetta heldur mappinu
for y in range(len(tempcopyx)):
self._Holder.append([])
for x in range(len(tempcopyx[y])):
self._Holder[y].append(tempcopyx[y][x])
if tempcopyx[y][x]=="E":
self._enemypos.append([x,y])
RaChoice=RandomEn[randint(0,len(RandomEn)-1)]#les random enemy รบr listanum
self._Enemies.append(Enemy(RaChoice[0],RaChoice[1],RaChoice[2],RaChoice[3],RaChoice[4]))
self._Holder[y][x]="."
if tempcopyx[y][x]=="I":
self._itempos.append([x,y])
self._Holder[y][x] = "."
if tempcopyx[y][x]=="S":
self._Characterpos=[x,y]
self._Holder[y][x] = "."
def searchOnscreen(self,dist,listi):
outp=[]
for x in range(len(listi)):
if listi[x][0] in range(self._Characterpos[0]-dist,self._Characterpos[0]+dist) and listi[x][1] in range(self._Characterpos[1]-dist,self._Characterpos[1]+dist):
outp.append(x)
return outp
def draw(self):
WallColor=Color_Form(cons.FOREGROUND_INTENSITY,cons.FOREGROUND_GREY,cons.BACKGROUND_INTENSITY,cons.BACKGROUND_RED)
EnemyColor=Color_Form(cons.FOREGROUND_RED,default_bg)
ItemColor = Color_Form(cons.FOREGROUND_YELLOW, default_bg)
CharacterColor = Color_Form(cons.FOREGROUND_CYAN, default_bg)
for x in range(0,30):
for y in range(0,30):
if self._Holder[y+self._Characterpos[1]-15][x+self._Characterpos[0]-15]=="#":
cons.set_text_attr(WallColor.GetColor())
else:
cons.set_text_attr(default_colors)
print_at(3+y,6+(x*2),self._Holder[y+self._Characterpos[1]-15][x+self._Characterpos[0]-15])
EnemyOnscreen=self.searchOnscreen(15,self._enemypos)
ItemOnscreen = self.searchOnscreen(15, self._itempos)
cons.set_text_attr(EnemyColor.GetColor())
for x in EnemyOnscreen:
print_at(18-(self._Characterpos[1]-self._enemypos[x][1]),36-(2*(self._Characterpos[0]-self._enemypos[x][0])),self._Enemies[x].GetStafur())
cons.set_text_attr(ItemColor.GetColor())
for x in ItemOnscreen:
print_at(18-(self._Characterpos[1]-self._itempos[x][1]),36-(2*(self._Characterpos[0]-self._itempos[x][0])),"I")
cons.set_text_attr(CharacterColor.GetColor())
print_at(18,36,"@")
def Action(self,command):
testloc=list(self._Characterpos)
if command== "w":
testloc[1]=testloc[1]-1
elif command== "s":
testloc[1]=testloc[1]+1
elif command== "a":
testloc[0]=testloc[0]-1
elif command== "d":
testloc[0]=testloc[0]+1
if self._Holder[testloc[1]][testloc[0]]=="#":
CTL("thad er eithvad fyrir ther")
return False
elif testloc in self._enemypos:
EnemyId=self._enemypos.index(testloc)
if self._Enemies[EnemyId].IsDead():
Gold=int(self._Enemies[EnemyId].Loot())
if Gold==0:
self._Characterpos=testloc
else:
Anton.AddMoney(Gold)
CTL("thu fanst "+str(Gold)+" kronur a likinu")
else:
self._Enemies[EnemyId].Recive_Damage(Anton.Attack())
return True
else:
self._Characterpos=testloc
return True
def CTL(message):
cons.set_text_attr(CommandColor.GetColor())
print_at(CTLoc[0], CTLoc[1], " ")
print_at(CTLoc[0], CTLoc[1], message)
cons.set_text_attr(cons.FOREGROUND_CYAN | default_bg)
print_at(CTLoc[0] + 1, CTLoc[1], " ")
print_at(CTLoc[0] + 1, CTLoc[1], "")
Leikur=Map("Test_2.txt")
def CTI(Message):
cons.set_text_attr(cons.FOREGROUND_YELLOW | default_bg)
print_at(CTLoc[0] -1, CTLoc[1], " ")
print_at(CTLoc[0] -1, CTLoc[1], "")
class Turns():
def __init__(self,EnemyList):
self._ind = 0
self._Turnlist=[]
for x in range(21):
self._Turnlist.append(["filler"])
self._Turnlist[0].append("C")
for x in range(len(EnemyList)):
self._Turnlist[randint(1,20)].append(x)
def GetTurn(self):
if len(self._Turnlist[0])<=self._ind:
self._ind = 0
for x in range(20):
self._Turnlist[x]=self._Turnlist[x+1]
self._Turnlist[20] = ["filler"]
self._ind = self._ind +1
return self._Turnlist[0][self._ind-1]
def SetTurn(self,hlutur,Agi):
self._Turnlist[10-Agi].append(hlutur)
Rodinn=Turns(Leikur._Enemies)
Trust=False
#hรฉr birjar leikurinn aรฐ gera hluti
while True:
while not Trust:
Anton.Print_Stats()
Leikur.draw()
print_at(CTLoc[0] + 1, CTLoc[1], " ")
print_at(CTLoc[0] + 1, CTLoc[1], "")
inp=input()
if "/" in inp:
pass
elif inp in "asdw":
Trust=Leikur.Action(inp)
Rodinn.SetTurn("C",Anton._Agi)
while Trust:
Onscreen=Leikur.searchOnscreen(10,Leikur._enemypos)
Engaged=Leikur.searchOnscreen(2,Leikur._enemypos)
Gera=Rodinn.GetTurn()
if Gera=="C":
Trust=False
elif Gera=="filler":
pass
elif Leikur._Enemies[Gera].IsDead():
pass
else:
if Gera in Onscreen:
if Gera in Engaged:
Anton.Recive_Damage(Leikur._Enemies[Gera].Attack())
else:
if Leikur._enemypos[Gera][1]==Leikur._Characterpos[1]:
if Leikur._enemypos[Gera][0]>Leikur._Characterpos[0]:
tempdir=-1
else:
tempdir=1
Leikur._enemypos[Gera][0]=Leikur._enemypos[Gera][0]+tempdir
if Leikur._enemypos[Gera][0]==Leikur._Characterpos[0]:
if Leikur._enemypos[Gera][1]>Leikur._Characterpos[1]:
tempdir=-1
else:
tempdir=1
Leikur._enemypos[Gera][1]=Leikur._enemypos[Gera][1]+tempdir
else:
randdir=randint(0,1)
if randdir==0:
if Leikur._enemypos[Gera][0] > Leikur._Characterpos[0]:
tempdir = -1
else:
tempdir = 1
else:
if Leikur._enemypos[Gera][1] > Leikur._Characterpos[1]:
tempdir = -1
else:
tempdir = 1
Leikur._enemypos[Gera][randdir] = Leikur._enemypos[Gera][randdir] + tempdir
else:
randdir = randint(0, 1)
randdir2= randint(0, 1)
Leikur._enemypos[Gera][randdir] = Leikur._enemypos[Gera][randdir] + [-1,1][randdir2]
Rodinn.SetTurn(Gera,Leikur._Enemies[Gera].GetAgi())
|
Mergjunarhola/TextBasedDungeonCrawler-1
|
Dungeoncrawler/GamePlayerV1.py
|
GamePlayerV1.py
|
py
| 15,523 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35412611474
|
# Ces programmes sont sous licence CeCILL-B V1.
# Provisoire : il faudrait crรฉer en python l'รฉquivalent de la classe
# Isn pour la gestion du graphisme. Mes connaissances graphiques sont
# assez limitรฉes, je me suis contentรฉ pour l'instant de quelque chose
# de simple.
#
# Guillaume
from tkinter import Tk,Frame,Button,Canvas,LEFT,RIGHT,TOP
root = None
application = None
canvas = None
btnQuitter = None
def initDrawing(titre,x,y,largeur,hauteur):
global root,application,canvas,btnQuitter
root = Tk()
application = Frame(root)
application.pack()
application.master.title(titre)
canvas = Canvas(application, width=largeur, height=hauteur)
canvas.pack(side=TOP)
btnQuitter = Button(application, text="Quitter", command=application.quit)
btnQuitter.pack(side=RIGHT)
def drawRectangle(x1,y1,x2,y2,rouge,vert,bleu):
global canvas
couleur = "#%02x%02x%02x" % (rouge,vert,bleu)
canvas.create_rectangle(x1,y1,x2,y2,outline=couleur,fill="white")
def drawCircle(x,y,rayon,rouge,vert,bleu):
global canvas
couleur = "#%02x%02x%02x" % (rouge,vert,bleu)
canvas.create_oval(x-rayon,y-rayon,x+rayon,y+rayon,outline=couleur,fill="white")
def drawPixel(x,y,rouge,vert,bleu):
global canvas
couleur = "#%02x%02x%02x" % (rouge,vert,bleu)
canvas.create_rectangle(x,y,x,y,outline=couleur)
def drawLine(x1,y1,x2,y2,rouge,vert,bleu):
global canvas
couleur = "#%02x%02x%02x" % (rouge,vert,bleu)
canvas.create_line(x1,y1,x2,y2,fill=couleur)
def showDrawing():
global root
root.mainloop()
gauche = 0
droite = 1
haut = 2
bas = 3
aucun = 4
def dessiner (x,y,rayon,interdit):
drawCircle(x,y,rayon,0,0,0)
if rayon > 1:
if interdit != droite:
dessiner(x + 3 * rayon // 2,y,rayon // 2,gauche)
if interdit != gauche:
dessiner(x - 3 * rayon // 2,y,rayon // 2,droite)
if interdit != haut:
dessiner(x,y - 3 * rayon // 2,rayon // 2,bas)
if interdit != bas:
dessiner(x,y + 3 * rayon // 2,rayon // 2,haut)
initDrawing("DessinRรฉcursif",10,10,400,400)
dessiner(200,200,64,aucun)
showDrawing()
|
OCamlPro/ISN-OCaml
|
chap5/DessinRecursif.py
|
DessinRecursif.py
|
py
| 2,138 |
python
|
fr
|
code
| 4 |
github-code
|
6
|
43317278578
|
# Scrapy settings for sci_abs project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from shutil import which
BOT_NAME = 'sci_abs'
SPIDER_MODULES = ['sci_abs.spiders']
NEWSPIDER_MODULE = 'sci_abs.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'sci_abs (+http://www.yourdomain.com)'
# Obey robots.txt rules
# ROBOTSTXT_OBEY = True
DOWNLOAD_DELAY = 2
RANDOMIZE_DOWNLOAD_DELAY = True
SELENIUM_DRIVER_NAME = 'firefox'
SELENIUM_DRIVER_EXECUTABLE_PATH = which('geckodriver')
SELENIUM_DRIVER_ARGUMENTS = ['-headless'] # '--headless' if using chrome instead of firefox
RETRY_TIMES = 3
# Retry on most error codes since proxies fail for different reasons
RETRY_HTTP_CODES = [500, 503, 504, 400, 403, 404, 408]
PROXY_LIST = 'sci_abs/spiders/proxies_round2'
PROXY_MODE = 0 # different proxy for each request
RANDOM_UA_PER_PROXY = True
FAKEUSERAGENT_FALLBACK = 'Mozillapip install scrapy_proxies'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'sci_abs.middlewares.SciAbsSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'sci_abs.middlewares.SciAbsDownloaderMiddleware': 543,
#}
DOWNLOADER_MIDDLEWARES = {
# 'news_oil_gas.middlewares.NewsOilGasDownloaderMiddleware': 543,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'scrapy_user_agents.middlewares.RandomUserAgentMiddleware': 400,
'scrapy.downloadermiddlewares.retry.RetryMiddleware': 900,
# 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': None,
'scrapy_proxies.RandomProxy': 700,
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 710,
'scrapy_selenium.SeleniumMiddleware': 750,
# 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': None,
# 'scrapy.downloadermiddlewares.cookies.PersistentCookiesMiddleware': 751,
'scrapy_splash.SplashCookiesMiddleware': 650,
'scrapy_splash.SplashMiddleware': 652,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
ITEM_PIPELINES = {
'sci_abs.pipelines.SciAbsPipeline': 300,
}
MONGO_URI= 'mongodb://root:[email protected]:27017/'
MONGO_DATABASE='abstracts'
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
SCHEDULER_PERSIST = True
SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue'
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# Specify the host and port to use when connecting to Redis (optional).
REDIS_HOST = '139.198.191.224'
REDIS_PORT = 6379
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# SCHEDULER_PERSIST = True
# SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue'
# # STATS_CLASS = "scrapy_redis.stats.RedisStatsCollector"
# # STATS_CLASS = "scrapy_redis.stats.RedisStatsCollector"
#
# # SCHEDULER_PERSIST = True
# DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# # Specify the host and port to use when connecting to Redis (optional).
# REDIS_HOST = '139.198.191.224'
# # REDIS_HOST='localhost'
# REDIS_PORT = 6379
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'sci_abs.pipelines.SciAbsPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
RayFromUiS/sci_abs_scraper
|
sci_abs/sci_abs/settings.py
|
settings.py
|
py
| 5,479 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22218777486
|
import director.tasks.robottasks as rt
taskLibrary = [
['utils', [
[rt.PrintTask, {}],
[rt.UserPromptTask, {}],
[rt.DelayTask, {}],
[rt.PauseTask, {}],
[rt.QuitTask, {}],
]],
['perception sensors', [
[rt.WaitForMultisenseLidar, {}],
[rt.SnapshotMultisensePointcloud, {}],
[rt.SnapshotSelectedPointcloud, {}],
[rt.SnapshotStereoPointcloud, {}],
[rt.FindHorizontalSurfaces, {}],
]],
['fitting', [
[rt.UserAnnotatePointCloud, {}],
[rt.UserSelectAffordanceCandidate, {}],
[rt.ProjectAffordanceToGround, {}],
[rt.FindHorizontalSurfaces, {}],
[rt.FitWallFrameFromAnnotation, {}],
[rt.FitShelfItem, {}],
[rt.FindRotaryDrillByAnnotation, {}],
[rt.ComputeRobotFootFrame, {}],
[rt.TransformFrame, {}],
]],
['spawn affordances', [
[rt.SpawnDrillBarrelAffordance, {}],
[rt.SpawnDrillRotaryAffordance, {}],
[rt.SpawnValveAffordance, {}],
]],
['planning', [
[rt.RequestFootstepPlan, {}],
[rt.RequestWalkingPlan, {}],
[rt.PlanPostureGoal, {}],
[rt.PlanReachToFrame, {}],
[rt.PlanGazeTrajectory, {}],
[rt.PlanStandPosture, {}],
[rt.PlanNominalPosture, {}],
]],
['execution', [
[rt.CommitManipulationPlan, {}],
[rt.CommitFootstepPlan, {}],
[rt.WaitForManipulationPlanExecution, {}],
[rt.WaitForWalkExecution, {}],
[rt.WaitForAtlasBehavior, {}],
]],
['hand control', [
[rt.CloseHand, {}],
[rt.OpenHand, {}],
]],
]
|
RobotLocomotion/director
|
src/python/director/tasks/descriptions/taskLibrary.py
|
taskLibrary.py
|
py
| 1,505 |
python
|
en
|
code
| 176 |
github-code
|
6
|
30503616911
|
# Author: Ron Jones
# Date Created: 7-3-17
# Date Last Modified: 7-4-17
# Purpose: Check CDS Overlay Excel Sheet with Master Data Sheet
# Status: Working perfectly with MDS and CDS_Overlay_Final2.xlsx as of July 4, 2017
'''Note: The "compare dicts function iterates through every
correct combination of entries from the overlay and data files to check
for any discrepancies, then checks every entry from the overlay against
the data to see if there are any entire records erroneously absent from
the MDS. For more detailed instructions, check FM_Overlay_Script, the
structure is basically the same'''
# Import openpyxl module to allow python to access data from Excel documents
import openpyxl as xl, sys
def main():
# Pull data from workbooks
data = xl.load_workbook(sys.argv[1])
overlay = xl.load_workbook(sys.argv[2])
# Pull worksheets from workbooks
data_sheet = data.get_sheet_by_name('Data')
overlay_sheet = overlay.get_sheet_by_name('Table 1')
# Open output file (validation comments) for writing
comments = open('Classified_Information_Comments', 'w')
#Write heading to output file
comments.write("Inconsistencies:" + "\n" + "\n")
# Open empty dictionary for overlay info
overlay_dict = {}
# Open empty dictionary for master info
data_dict = {}
populate_overlay_dict(overlay_sheet, overlay_dict)
populate_data_dict(data_sheet, data_dict)
compare_dicts(data_dict, overlay_dict, comments)
def populate_overlay_dict(sheet, inp_dict):
titles = ['CONTROL', 'CLASSIFIED INFORMATION OVERLAY']
for i in range(60, 157):
if not sheet.cell(row=i, column=1).value in titles:
inp_dict[sheet.cell(row=i, column=1).value] = sheet.cell(row=i, column=2).value
#print("Overlay dictionary: ", inp_dict)
def populate_data_dict(worksheet, inp):
for i in range(4, worksheet.max_row + 1):
if not worksheet.cell(row=i, column=3).value in inp:
inp[worksheet.cell(row=i, column=3).value] = [worksheet.cell(row=i, column=50).value]
else:
inp[worksheet.cell(row=i, column=3).value].append(worksheet.cell(row=i, column=50).value)
#print("Data Dict: ", inp)
def compare_dicts(data, overlay, outfile):
switch = 0
#For loop to check for incorrect/missing entries
for key in data:
for key2 in overlay:
if key == key2:
for elt in data[key]:
if elt == overlay[key2]:
#Can uncomment for visual evidence that loop executed
#print("Data validated " + str(key) + " " + str(key2))
continue
else:
outfile.write("Discrepancy with control " + str(key) + "\n" + "\n")
switch = 1
break
continue
#For loop to check for missing records
for key2 in overlay:
if not key2 in data:
outfile.write(((str(key2) + " should include a " + str(overlay[key2]) + " in the overlay column of MDS, but the record itself does not exist" + "\n" + "\n")))
switch = 1
if switch == 0:
print("No discrepancies found")
else:
print("There were some discrepancies. Check 'Classified_Information_Comments for more information")
main()
|
NISTBoard/data_validation
|
Classified_Info_Script.py
|
Classified_Info_Script.py
|
py
| 3,365 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3238678172
|
"""Contains the class system_objects.
Used to compute systems of thermal objects.
"""
import copy
from .. import solvers
from . import Object
class SystemObjects:
"""System_objects class.
This class creates a system of unidimensional thermal objects, establishes
contact between them and computes the respective thermal processes.
"""
def __init__(self, number_objects=2, materials=('Cu', 'Cu'),
objects_length=(10, 10), amb_temperature=293, dx=0.01, dt=0.1,
file_name=None, initial_state=False,
boundaries=((2, 0), (3, 0)), materials_path=False):
"""System object initialization.
`number_objects` is the integer number of thermal objects. `materials`
is the list of strings of all the used materials present in
`material_path`. `amb_temperature` is the ambient temperature of the
whole system. `object_length` is the list of thermal object lengths
(spacial steps). `dx` and `dt` are the space and time steps,
respectively. `file_name` is the file name where the temperature is
saved. `boundaries` is a list of tuples of length two that define each
boundary condition for temperature. If 0 the boundary condition is
insulation. `materials_path` is absolute path of the materials
database. If false, then the materials database is the standard
heatrapy database.
"""
# check the validity of inputs
materials = tuple(materials)
objects_length = tuple(objects_length)
boundaries = tuple(boundaries)
cond01 = isinstance(amb_temperature, float)
cond01 = cond01 or isinstance(amb_temperature, int)
cond02 = isinstance(materials, tuple)
cond03 = isinstance(number_objects, int)
cond04 = isinstance(objects_length, tuple)
cond05 = isinstance(dx, int) or isinstance(dx, float)
cond06 = isinstance(dt, int) or isinstance(dt, float)
cond07 = isinstance(file_name, str)
cond07 = cond07 or (file_name is None)
cond08 = isinstance(boundaries, tuple)
cond09 = isinstance(initial_state, bool)
condition = cond01 and cond02 and cond03 and cond04 and cond05
condition = condition and cond06 and cond07 and cond08 and cond09
if not condition:
raise ValueError
# initial definitions
self.objects = []
for i in range(number_objects):
if file_name:
file_name = file_name + '_' + str(i) + '.txt'
self.objects.append(Object(amb_temperature,
materials=(materials[i],),
borders=(1, objects_length[i]+1),
materials_order=(0,), dx=dx, dt=dt,
file_name=file_name, boundaries=(0, 0),
Q=[], Q0=[], initial_state=initial_state,
materials_path=materials_path))
self.contacts = set()
self.boundaries = boundaries
self.dt = dt
self.q1 = 0.
self.q2 = 0.
for i in boundaries:
if i[1] != 0:
for j in range(len(self.objects[i[0]].temperature)):
self.objects[i[0]].temperature[j] = [i[1], i[1]]
def contact_filter(self, object):
"""Filter self.contacts by thermal object id.
object: thermal object id
"""
# check the validity of inputs
condition = object in range(len(self.objects))
if not condition:
raise ValueError
filtered = [x for x in
self.contacts if (x[0][0] == object or x[1][0] == object)]
return set(filtered)
def contact_add(self, contact):
"""Add contact to self.contacts.
The `contact` parameter is a tuple of length 3 (one element for thermal
object A, one for thermal object B, and one for the heat transfer
coefficient). Each thermal object element is a tuple of length 2 where
the first element is the index of the thermal object and the second is
the spatial point index.
"""
# check the validity of inputs
if isinstance(contact, list) or isinstance(contact, tuple):
if len(contact) == 3:
condition = True
else:
condition = False
else:
condition = False
if not condition:
raise ValueError
self.contacts.add(contact)
def contact_remove(self, object_one, object_two):
"""Contact removal.
Removes all contacts between `object_one` id and `object_two` id.
"""
# check the validity of inputs
condition = isinstance(object_one, int)
condition = condition and isinstance(object_two, int)
if not condition:
raise ValueError
contact_list = list(self.contacts)
for i in range(len(contact_list)):
cond_1 = contact_list[i][0][0] == object_one
cond_1 = cond_1 and contact_list[i][1][0] == object_two
cond_2 = contact_list[i][0][0] == object_two
cond_2 = cond_2 and contact_list[i][1][0] == object_one
if cond_1 or cond_2:
self.contacts.remove(contact_list[i])
def change_boundaries(self, object_id, boundaries):
"""Change boundaries.
Changes the `boundaries` of `object_id`.
"""
# check the validity of inputs
condition = isinstance(object_id, int)
condition = condition and isinstance(boundaries, tuple)
if condition:
if len(boundaries) == 2:
condition = True
else:
condition = False
if not condition:
raise ValueError
self.objects[object_id].boundaries = boundaries
def compute(self, time_interval, write_interval, solver='implicit_k(x)',
verbose=True):
"""Compute the thermal process.
Computes the system for `time_interval`, and writes into the
`file_name` file every `write_interval` time steps. Four different
solvers can be used: `'explicit_general'`, `'explicit_k(x)'`,
`'implicit_general'`, and `'implicit_k(x)'`. If `verbose = True`, then
the progress of the computation is shown.
"""
# check the validity of inputs
cond1 = isinstance(time_interval, float)
cond1 = cond1 or isinstance(time_interval, int)
cond2 = isinstance(write_interval, int)
cond3 = isinstance(solver, str)
cond4 = isinstance(verbose, bool)
condition = cond1 and cond2 and cond3 and cond4
if not condition:
raise ValueError
# number of time steps for the given timeInterval
nt = int(time_interval / self.dt)
# number of time steps counting from the last writing process
nw = 0
# computes
for j in range(nt):
for obj in self.objects:
obj.Q0 = copy.copy(obj.Q0_ref)
for contact in self.contacts:
ind1 = int(contact[1][1])
ind2 = int(contact[0][1])
td1 = self.objects[contact[1][0]].temperature[ind1][0]
td2 = self.objects[contact[0][0]].temperature[ind2][0]
heat_contact_1 = contact[2] * (td1 - td2)
heat_contact_2 = contact[2] * (td2 - td1)
self.objects[contact[0][0]].Q0[ind2] = heat_contact_1
self.objects[contact[1][0]].Q0[ind1] = heat_contact_2
object_number = -1
for obj in self.objects:
object_number = object_number + 1
obj.time_passed = obj.time_passed + obj.dt
cond1 = object_number not in [l[0] for l in self.boundaries]
if cond1 or (object_number, 0) in self.boundaries:
# defines the material properties
for i in range(1, obj.num_points - 1):
if obj.state[i] is True:
ind = obj.materials_index[i]
obj.rho[i] = obj.materials[ind].rhoa(
obj.temperature[i][0])
obj.Cp[i] = obj.materials[ind].cpa(
obj.temperature[i][0])
obj.k[i] = obj.materials[ind].ka(
obj.temperature[i][0])
if obj.state[i] is False:
ind = obj.materials_index[i]
obj.rho[i] = obj.materials[ind].rho0(
obj.temperature[i][0])
obj.Cp[i] = obj.materials[ind].cp0(
obj.temperature[i][0])
obj.k[i] = obj.materials[ind].k0(
obj.temperature[i][0])
# SOLVERS
# implicit k constant
if solver == 'implicit_general':
value = solvers.implicit_general(obj)
obj.temperature, obj.lheat = value
# implicit k dependent on x
if solver == 'implicit_k(x)':
obj.temperature, obj.lheat = solvers.implicit_k(obj)
# explicit k constant
if solver == 'explicit_general':
value = solvers.explicit_general(obj)
obj.temperature, obj.lheat = value
# explicit k dependent on x
if solver == 'explicit_k(x)':
obj.temperature, obj.lheat = solvers.explicit_k(obj)
# writes the temperature to file_name file ...
# if the number of time steps is verified
if obj.file_name:
if nw + 1 == write_interval or j == 0 or j == nt - 1:
line = '%f' % obj.time_passed
for i in obj.temperature:
new_line = ',%f' % i[1]
line = line + new_line
f = open(obj.file_name, 'a')
f.write(line+'\n')
f.close()
else:
heat = [p*self.dt*obj.dx for p in obj.Q0 if p is not None]
heat = sum(heat)/(len(heat)*obj.dx)
if object_number == self.boundaries[0][0]:
self.q1 = self.q1 + heat
q = self.q1
else:
self.q2 = self.q2 + heat
q = self.q2
# writes the temperature to file_name file ...
# if the number of time steps is verified
if obj.file_name:
if nw + 1 == write_interval or j == 0 or j == nt - 1:
line = '%f' % obj.time_passed
for i in obj.temperature:
new_line = ',%f' % i[1]
line = line + new_line
new_line = ',%f' % q
line = line + new_line
f = open(obj.file_name, 'a')
f.write(line+'\n')
f.close()
if nw == write_interval:
nw = 0
if verbose:
print('progress:', int(100*j/nt), '%', end='\r')
else:
nw = nw + 1
if verbose:
print('Finished simulation')
|
djsilva99/heatrapy
|
heatrapy/dimension_1/objects/system.py
|
system.py
|
py
| 11,858 |
python
|
en
|
code
| 51 |
github-code
|
6
|
39654407914
|
import os
import logging
import yaml
from typing import Dict, Any
from yacs.config import CfgNode as _CfgNode
BASE_KEY = "__BASE__"
class CfgNode(_CfgNode):
@staticmethod
def load_yaml_with_base(filename: str, allow_unsafe: bool = False):
with open(filename, 'r') as file:
try:
cfg = yaml.safe_load(file)
except:
logger = logging.getLogger(__name__)
logger.warning(
"Loading config {} with yaml.unsafe_load. Your machine may "
"be at risk if the file contains malicious content.".format(
filename
)
)
file.close()
with open(filename, "r") as file:
cfg = yaml.unsafe_load(file)
def merge_a_into_b(a: Dict[Any, Any], b: Dict[Any, Any]) -> None:
# merge dict a into dict b. values in a will overwrite b.
for k, v in a.items():
if isinstance(v, dict) and k in b:
assert isinstance( b[k], dict ), "Cannot inhert key '{}' from base !".format(k)
merge_a_into_b(v, b[k])
else:
b[k] = v
if BASE_KEY in cfg:
base_cfg_file = cfg[BASE_KEY]
if base_cfg_file.startswith("~"):
base_cfg_file = os.path.expanduser(base_cfg_file)
if not any(
map(base_cfg_file.startswith, ["/", "https://", "http://"])
):
# the path to base cfg is relative to the config file itself
base_cfg_file = os.path.join(os.path.dirname(filename), base_cfg_file)
base_cfg = CfgNode.load_yaml_with_base(
base_cfg_file, allow_unsafe=allow_unsafe
)
del cfg[BASE_KEY]
merge_a_into_b(cfg, base_cfg)
return base_cfg
return cfg
def merge_from_file(self, cfg_filename: str, allow_unsave: bool=False) -> None:
loaded_cfg = CfgNode.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsave)
loaded_cfg = type(self)(loaded_cfg)
self.merge_from_other_cfg(loaded_cfg)
def merge_from_other_cfg(self, cfg_other):
assert (
BASE_KEY not in cfg_other
), "The reserved key '{}' can only be used in files!".format(BASE_KEY)
return super(CfgNode, self).merge_from_other_cfg(cfg_other)
def merge_from_list(self, cfg_list):
keys = set(cfg_list[0::2])
assert (
BASE_KEY not in keys
), "The reserved key '{}' can obly be used in files!".format(BASE_KEY)
return super(CfgNode, self).merge_from_list(cfg_list)
def __setattr__(self, name: str, value: Any) -> None:
if name.startswith("COMPUTED_"):
if name in self:
old_val = self[name]
if old_val == value:
return
raise KeyError(
"Computed attributed '{}' alread exists"
"with a different value! old={}, net={}".format(
name, old_val, value
)
)
self[name] = value
else:
super(CfgNode, self).__setattr__(name=name, value=value)
def dump(self, **kwargs):
return super(CfgNode, self).dump()
|
lqxisok/llSeg
|
configs/base.py
|
base.py
|
py
| 3,491 |
python
|
en
|
code
| 2 |
github-code
|
6
|
9649531902
|
class Node:
def __init__(self,value):
self.value=value
self.left=None
self.right=None
def find_max(node):
max1=max2=0
if not node:
return -1
else:
res=node.value
if node.left:
max1=find_max(node.left)
if node.right:
max2=find_max(node.right)
if res<max1:
res=max1
if res<max2:
res=max2
return res
if __name__=="__main__":
node=Node(12)
node.left=Node(21)
node.right=Node(14)
node.left.left=Node(16)
node.left.right=Node(19)
node.right.left=Node(23)
node.right.right=Node(17)
print(find_max(node))
|
babiswas2020/Python
|
tree_54.py
|
tree_54.py
|
py
| 651 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
71927845947
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
import logging
import sys
import os
import urlparse
import xbmcgui
import xbmcplugin
import xbmcaddon
from resources.lib import loghandler
loghandler.config()
LOG = logging.getLogger()
PLUGIN_PATH = 'plugin://plugin.video.proof-of-concept'
__addon__ = xbmcaddon.Addon()
__addon_path__ = __addon__.getAddonInfo('path').decode('utf-8')
# Dummy video file with a lenght of 10min, 5s
VIDEO_FILE_PATH = os.path.join(__addon_path__, 'dummy-movie.mkv').encode('utf-8')
TOTAL_LENGTH = 10 * 60 + 5
RESUME = 5 * 60
def directory_item(label, path):
"""
Adds a xbmcplugin.addDirectoryItem() directory itemlistitem
"""
listitem = xbmcgui.ListItem(label, path=path)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),
url=path,
listitem=listitem,
isFolder=True)
def main_menu():
xbmcplugin.setContent(int(sys.argv[1]), 'files')
directory_item('Proof of concept',
'%s/?mode=demo' % PLUGIN_PATH)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def show_demo():
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
listitem = xbmcgui.ListItem('Demo video file',
path=VIDEO_FILE_PATH)
# PROOF-OF-CONCEPT: Let's add a resume point
listitem.setProperty("totaltime", str(TOTAL_LENGTH))
listitem.setProperty("resumetime", str(RESUME))
listitem.setProperty("StartOffset", str(RESUME))
# END
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),
url=VIDEO_FILE_PATH,
listitem=listitem)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
if __name__ == '__main__':
LOG.info('Full sys.argv received: %s', sys.argv)
args = sys.argv[2][1:].decode('utf-8')
args = dict(urlparse.parse_qsl(args))
mode = args.get('mode')
if mode == 'demo':
show_demo()
else:
main_menu()
|
croneter/plugin.video.proof-of-concept
|
default.py
|
default.py
|
py
| 2,070 |
python
|
en
|
code
| 1 |
github-code
|
6
|
22514895256
|
#!/usr/bin/env python3
from fastapi import APIRouter, Body, Request, Response, HTTPException, status
from bson.objectid import ObjectId
from typing import List
from lib.mongo import insert_one, find_one, find_many, update_one
from models.prescription import Prescription, PrescriptionUpdate
router = APIRouter()
coll = "prescription"
@router.get("/{nss}", response_description="Get all prescriptions for a patient", status_code=status.HTTP_200_OK,
response_model=List[Prescription])
def find_precriptions(request: Request, nss: str):
find_criteria = {"nss": nss}
return find_many(request, find_criteria, coll)
@router.post("/", response_description="Create a new prescription", status_code=status.HTTP_201_CREATED,
response_model=Prescription)
def create_prescription(request: Request, prescription: PrescriptionUpdate = Body(...)):
inserted = insert_one(request, prescription, coll)
return find_one(request, {'_id': inserted.inserted_id}, coll)
@router.post("/associate_checkup",
response_description="Links a checkup to the 'checkup' field for a prescription",
status_code=status.HTTP_200_OK,
response_model=Prescription)
def associate_checkup_with_prescription(request: Request, data=Body(...)):
print(data)
prescription_find_criteria = {"_id": ObjectId(data['prescription_id'])}
update_one(request, prescription_find_criteria, {
"$set": {
"consulta": data['checkup_id']
}
}, coll)
return find_one(request, prescription_find_criteria, coll)
|
Serlych/national-medical-record
|
routes/prescription.py
|
prescription.py
|
py
| 1,582 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13126886716
|
from itertools import combinations
def make_all_cases(user_info_array):
all_cases_from_user = [];
for i in range(5):
combination_array = combinations([0,1,2,3],i)
for combination in combination_array:
case = "" #[] -> ----
for j in range(4):
if j in combination:
case += user_info_array[j]
else :
case += "-"
all_cases_from_user.append(case);
return all_cases_from_user
def get_lower_bound(target,array):
current_min = 0;
current_max = len(array)
while current_min < current_max:
current_guess = (current_min + current_max) // 2;
if array[current_guess] >= target:
current_max = current_guess;
else:
current_min = current_guess +1;
return current_max
def solution(info, query):
answer = [];
all_cases_from_users = {}
for user_info in info:
user_info_array = user_info.split()
all_cases_from_user = make_all_cases(user_info_array);
for case in all_cases_from_user:
if case not in all_cases_from_users.keys():
all_cases_from_users[case] = [int(user_info_array[4])]
else :
all_cases_from_users[case].append(int(user_info_array[4]))
for key in all_cases_from_users.keys():
all_cases_from_users[key].sort()
for query_info in query:
query_info_array = query_info.split()
case = query_info_array[0] + query_info_array[2] +query_info_array[4] + query_info_array[6];
if case in all_cases_from_users.keys():
target_users = all_cases_from_users[case]
answer.append(len(target_users) - get_lower_bound(int(query_info_array[7]), target_users))
else :
answer.append(0)
return answer
|
39world/Today-Algorithm-Study-
|
old_test/al_pg_08.py
|
al_pg_08.py
|
py
| 2,005 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41040069910
|
from tracemalloc import start
import pyaudio
import wave
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.ticker import *
import numpy as np
import struct
import time
from scipy import interpolate
plt.style.use('gray-background')
class Fourier:
def __init__(self, scale, dt):
self.scale = scale
def fourier(self, f): # ไบบ้ใฎๅฏ่ดๅใฏ20~20,000Hz
f = np.array([f]).reshape(-1)
len_f = len(f)
# resize
inv_F_ = np.resize(f, int(len_f*self.scale))
# ใชใตใณใใชใณใฐ
t = np.arange(0, len(inv_F_))
f_linear = interpolate.interp1d(t, inv_F_, kind='cubic')
t = np.arange(0, len(inv_F_)-1.0, self.scale)
inv_F_ = f_linear(t)
inv_F_ = np.array(inv_F_, dtype='int16')
binv_F = struct.pack('h' * len(inv_F_), *inv_F_) #ใใคใใชใธๅคๆ
return binv_F
class Audio:
def __init__(self, chunk=2**10, format=pyaudio.paInt16, channels=1, rate=44100,
record_time=50, interval=0.01, output_path="./data/output.wav"):
self.chunk = chunk #ใใใใกใฎใตใคใบ
self.format = format #้ๅญๅใใใๆฐ(่งฃๅๅบฆ)ใโปไบบ้ใฏ16 bit ไปฅไธใฏ่ใๅใใ้ฃใใใชใ
self.channels = channels #ๅ
ฅๅใซไฝฟ็จใใใใคใฏใฎๆฌๆฐ
self.rate = rate #ใตใณใใชใณใฐๅจๆณขๆฐ
self.record_time = record_time #้ฒ้ณๆ้
self.interval = interval #ใฐใฉใใๅบๅใใๆ้้้ [ms]
self.output_path = output_path #ใใผใฟๅบๅใใใใกใคใซๅ
self.p = pyaudio.PyAudio() #ใคใณในใฟใณในใฎ่จญๅฎ
self.stream = self.p.open(format=self.format,
channels=self.channels,
rate=self.rate,
input=True, output=True,
frames_per_buffer=self.chunk) #ใใฉใกใผใฟใฎ่จญๅฎ
def exit(self):
self.stream.stop_stream() # ๅ็ใป้ฒ้ณใฎไธๆๅๆญข
self.stream.close() # ในใใชใผใ ใฎ็ตไบ
self.p.terminate() # ใคใณในใฟใณในใฎ็ ดๆฃ
class Output:
def __init__(self, audio, scale=1):
self.audio = audio
del_x = 1/self.audio.rate
self.end_t = del_x*self.audio.chunk
self.scale = scale
#self.frames = []
def draw_init(self, ax):
ax.set_xlabel('Time')
ax.set_ylabel('Amplitude')
def draw(self):
frames = []
f = Fourier(scale=self.scale, dt=self.audio.interval)
print("Recording ...")
# for i in range(0, int(self.audio.rate / self.audio.chunk * self.audio.record_time)):
while self.audio.stream.is_active():
data = self.audio.stream.read(self.audio.chunk)
wavy_ = np.frombuffer(data, dtype='int16')
binv_F = f.fourier(wavy_)
self.audio.stream.write(binv_F)
# frames.append(binv_F)
print("Done.")
return frames
def write(self, frames): # ใใผใฟใฎๆธใ่พผใฟ
wf = wave.open(self.audio.output_path, 'wb')
wf.setnchannels(self.audio.channels)
wf.setsampwidth(self.audio.p.get_sample_size(self.audio.format))
wf.setframerate(self.audio.rate*self.scale)
wf.writeframes(b''.join(frames))
wf.close()
if __name__=="__main__":
scale = 2.0
audio = Audio()
output=Output(audio, scale=scale)
frames = output.draw()
# output.write(frames)
audio.exit()
|
MoeMatsuda-ai/SWVC
|
test/fft_live_test/inout_live.py
|
inout_live.py
|
py
| 3,531 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31653995407
|
#!/usr/local/bin/python3
from priority_queue import *
from graph_adt import *
def dijkstra(G, s):
""" Performs Dijktra's algorithm to find the shortest path from a single source
to all other vertices in a weighted graph.
Parameters:
G - Graph represented with an adjacency list mapping the vertices to lists of edges
s - source vertex
Returns:
A list of tuples representing the parent child relationships during the
discovery paths. I.e. tuple = (parent, child)
"""
q_cap = G.vertex_count() + G.edge_count() #capacity of the priority queue
S = []
Q = PriorityQueue(q_cap)
s.set_d_val(0) #initialize source's current distance
Q.insert(0, s)
while not Q.is_empty() :
min_element = Q.extract_min()
u = min_element.get_value()
if u not in S:
S.append(u)
for e in G.Adj[u]:
priority, v = relax(u, e.opposite(u), e.get_weight())
if priority and v:
Q.insert(priority, v)
return S
def relax(u, v, w):
""" Performs edge relaxation during Dijktra's exploration
Parameters:
u - source node
v - destination node
w - weight from u to v
Returns:
tuple: (updated weight, v), if relaxation was performed.
v is updated with its new parent.
"""
if v.get_d_val() > (u.get_d_val() + w):
v.set_d_val(u.get_d_val() + w)
v.set_parent(u) #make u the parent of v
return(v.get_d_val(), v)
else:
return (None, None)
def main():
#Instantiate undirected graph
Gr = Graph()
#Create vertices
W = Gr.insert_vertex("w")
P = Gr.insert_vertex("p")
Y = Gr.insert_vertex("y")
R = Gr.insert_vertex("r")
B = Gr.insert_vertex("b")
#Create edges
W_P = Gr.insert_edge(W, P, 7)
W_Y = Gr.insert_edge(W, Y, 19)
P_Y = Gr.insert_edge(P, Y, 11)
P_R = Gr.insert_edge(P, R, 15)
P_B = Gr.insert_edge(P, B, 5)
Y_R = Gr.insert_edge(Y, R, 4)
B_R = Gr.insert_edge(B, R, 13)
print("Number of vertices: ", Gr.vertex_count())
print("Number of edges: ", Gr.edge_count())
paths = dijkstra(Gr, R)
print("Shortest paths (parent, destination):")
for node in paths:
parent = node.get_parent().get_element() if node.get_parent() is not None else None
print(parent, ", ", node.get_element())
if __name__ == '__main__':
main()
|
ilee38/practice-python
|
graphs/dijkstra.py
|
dijkstra.py
|
py
| 2,316 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20250267652
|
from mimetypes import init
import requests
import urllib.parse
import json
class MapBox:
def __init__(self, access_token) -> None:
self.root_url = "https://api.mapbox.com/geocoding/v5/mapbox.places/{}.json?types=place%2Caddress%2Cregion&access_token={}"
self.access_token = access_token
def getCoordinates(self, location_str):
if location_str == "":
return (0,0)
formatted_location = urllib.parse.quote(location_str)
url = self.root_url.format(formatted_location, self.access_token)
response = requests.get(url)
data = json.loads(response.text)
if (len(data["features"]) > 0):
coordinates = data["features"][0]["center"]
if coordinates != None and len(coordinates) == 2:
return (coordinates[1], coordinates[0])
else:
return (0,0)
mb = MapBox("pk.eyJ1IjoiYW5kcmV3aHVhbmciLCJhIjoiY2t5a3dzbDMxMWdrMTJ4b2wzMjlqNXZvNyJ9.K6nzS4XPLOfQ0srwV3M5rw")
# https://api.mapbox.com/geocoding/v5/mapbox.places/Collegeville%2C%20PA.json?access_token=pk.eyJ1IjoiYW5kcmV3aHVhbmciLCJhIjoiY2t5a3dyZWJvMzBrMTJxcG0xenBtYTdhZiJ9.uFJLIrcDl4OHJu1S-To2xA
# https://api.mapbox.com/geocoding/v5/mapbox.places/Collegeville%2C%20PA..hson?access_token=pk.eyJ1IjoiYW5kcmV3aHVhbmciLCJhIjoiY2t5a3dzbDMxMWdrMTJ4b2wzMjlqNXZvNyJ9.K6nzS4XPLOfQ0srwV3M5rw
|
andrewhuang427/WashU-Athletics-Demographics
|
utils/MapBox.py
|
MapBox.py
|
py
| 1,369 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35041229962
|
from toscaparser.imports import ImportsLoader
from configuration_tool.common import utils
from configuration_tool.common.configuration import Configuration
from configuration_tool.common.tosca_reserved_keys import *
from configuration_tool.providers.common.provider_configuration import ProviderConfiguration
from configuration_tool.providers.common.provider_resource import ProviderResource
import os, copy, logging, sys
SEPARATOR = ':'
class ProviderToscaTemplate(object):
REQUIRED_CONFIG_PARAMS = (TOSCA_ELEMENTS_MAP_FILE, TOSCA_ELEMENTS_DEFINITION_FILE)
DEPENDENCY_FUNCTIONS = (GET_PROPERTY, GET_ATTRIBUTE, GET_OPERATION_OUTPUT)
DEFAULT_ARTIFACTS_DIRECTOR = ARTIFACTS
def __init__(self, template, provider, configuration_tool, cluster_name, host_ip_parameter, is_delete,
grpc_cotea_endpoint):
self.host_ip_parameter = host_ip_parameter
self.provider = provider
self.grpc_cotea_endpoint = grpc_cotea_endpoint
self.is_delete = is_delete
self.configuration_tool = configuration_tool
self.provider_config = ProviderConfiguration(self.provider)
self.base_config = Configuration()
self.cluster_name = cluster_name
self.software_types = set()
for sec in self.REQUIRED_CONFIG_PARAMS:
if not self.provider_config.config[self.provider_config.MAIN_SECTION].get(sec):
logging.error("Provider configuration parameter \'%s\' has missing value" % sec)
logging.error("Translating failed")
raise Exception("Provider configuration parameter \'%s\' has missing value" % sec)
self.definitions = {}
import_definition_file = ImportsLoader([self.definition_file()], None, list(SERVICE_TEMPLATE_KEYS),
template.get(TOPOLOGY_TEMPLATE))
self.definitions.update(import_definition_file.get_custom_defs())
import_definition_file = ImportsLoader(self.base_definition_file(), None, list(SERVICE_TEMPLATE_KEYS),
template.get(TOPOLOGY_TEMPLATE))
self.definitions.update(import_definition_file.get_custom_defs())
self.definitions.update(template.get(NODE_TYPES, {}))
self.definitions.update(template.get(RELATIONSHIP_TYPES, {}))
self.definitions.update(template.get(CAPABILITY_TYPES, {}))
self.definitions.update(template.get(DATA_TYPES, {}))
self.definitions.update(template.get(POLICY_TYPES, {}))
self.definitions.update(template.get(GROUP_TYPES, {}))
self.definitions.update(template.get(INTERFACE_TYPES, {}))
self.fulfil_definitions_with_parents()
self.node_templates = {}
self.relationship_templates = {}
self.inputs = {}
self.outputs = {}
if template.get(TOPOLOGY_TEMPLATE).get(NODE_TEMPLATES):
self.node_templates = template.get(TOPOLOGY_TEMPLATE)[NODE_TEMPLATES]
if template.get(TOPOLOGY_TEMPLATE).get(RELATIONSHIP_TEMPLATES):
self.relationship_templates = template.get(TOPOLOGY_TEMPLATE)[RELATIONSHIP_TEMPLATES]
if template.get(TOPOLOGY_TEMPLATE).get(OUTPUTS):
self.outputs = template.get(TOPOLOGY_TEMPLATE)[OUTPUTS]
if template.get(TOPOLOGY_TEMPLATE).get(INPUTS):
self.inputs = template.get(TOPOLOGY_TEMPLATE)[INPUTS]
self.configuration_content = None
self.configuration_ready = None
self.template_dependencies = dict()
self._relation_target_source = dict()
self.resolve_in_template_dependencies()
# After this step self.node_templates has requirements with node_filter parameter
self.replace_requirements_with_node_filter()
self.provider_nodes = self._provider_nodes()
self.provider_relations = self._provider_relations()
self.provider_operations, self.reversed_provider_operations = self.sort_nodes_and_operations_by_graph_dependency()
def resolve_in_template_dependencies(self):
"""
TODO think through the logic to replace mentions by id
Changes all mentions of node_templates by name in requirements, places dictionary with node_filter instead
:return:
"""
for node_name, node in self.node_templates.items():
for req in node.get(REQUIREMENTS, []):
for req_name, req_body in req.items():
# Valid keys are ('node', 'node_filter', 'relationship', 'capability', 'occurrences')
# Only node and relationship might be a template name or a type
req_relationship = req_body.get(RELATIONSHIP)
req_node = req_body.get(NODE)
if req_relationship is not None:
(_, _, type_name) = utils.tosca_type_parse(req_relationship)
if type_name is None:
self.add_template_dependency(node_name, req_relationship)
self._relation_target_source[req_relationship] = {
'source': node_name,
'target': req_node
}
if req_node is not None:
(_, _, type_name) = utils.tosca_type_parse(req_node)
if type_name is None:
self.add_template_dependency(node_name, req_node)
node_types_from_requirements = set()
req_definitions = self.definitions[node[TYPE]].get(REQUIREMENTS, [])
for req in req_definitions:
for req_name, req_def in req.items():
if req_def.get(NODE, None) is not None:
if req_def[NODE] != node[TYPE]:
node_types_from_requirements.add(req_def[NODE])
for req_node_name, req_node_tmpl in self.node_templates.items():
if req_node_tmpl[TYPE] in node_types_from_requirements:
self.add_template_dependency(node_name, req_node_name)
def add_template_dependency(self, node_name, dependency_name):
if not dependency_name == SELF and not node_name == dependency_name:
if self.template_dependencies.get(node_name) is None:
self.template_dependencies[node_name] = {dependency_name}
else:
self.template_dependencies[node_name].add(dependency_name)
def base_definition_file(self):
file_definitions = self.base_config.config['main'][TOSCA_ELEMENTS_DEFINITION_FILE].split(',')
def_list = []
for file_definition in file_definitions:
if not os.path.isabs(file_definition):
file_definition = os.path.join(utils.get_project_root_path(), file_definition)
def_list.append(file_definition)
if not os.path.isfile(file_definition):
logging.error("TOSCA definition file not found: %s" % file_definition)
raise Exception("TOSCA definition file not found: %s" % file_definition)
return def_list
def definition_file(self):
file_definition = self.provider_config.config['main'][TOSCA_ELEMENTS_DEFINITION_FILE]
if not os.path.isabs(file_definition):
file_definition = os.path.join(self.provider_config.config_directory, file_definition)
if not os.path.isfile(file_definition):
logging.error("TOSCA definition file not found: %s" % file_definition)
raise Exception("TOSCA definition file not found: %s" % file_definition)
return file_definition
def replace_requirements_with_node_filter(self):
for node_name, node in self.node_templates.items():
for req in node.get(REQUIREMENTS, []):
for req_name, req_body in req.items():
if req_body.get(NODE):
node_tmpl = self.node_templates.get(req_body[NODE])
node_filter = dict()
properties = node_tmpl.get(PROPERTIES)
props_list = []
if properties:
for prop_name, prop in properties.items():
props_list.append({prop_name: prop})
capabilities = node_tmpl.get(CAPABILITIES)
caps_list = []
if capabilities:
for cap_name, cap in capabilities.items():
cap_props = cap.get(PROPERTIES, {})
cap_props_list = []
for prop_name, prop in cap_props.items():
cap_props_list.append({prop_name, prop})
caps_list.append({PROPERTIES: cap_props_list})
if properties:
node_filter[PROPERTIES] = props_list
if capabilities:
node_filter[CAPABILITIES] = caps_list
req_body[NODE_FILTER] = node_filter
req[req_name] = req_body
def _provider_nodes(self):
"""
Create a list of ProviderResource classes to represent a node in TOSCA
:return: list of class objects inherited from ProviderResource
"""
provider_nodes = dict()
for node_name, node in self.node_templates.items():
(namespace, category, type_name) = utils.tosca_type_parse(node[TYPE])
is_software_component = node[TYPE] in self.software_types
if namespace != self.provider and not is_software_component or category != NODES:
logging.error('Unexpected values: node \'%s\' not a software component and has a provider \'%s\'. '
'Node will be ignored' % (node.name, namespace))
else:
provider_node_instance = ProviderResource(self.provider, self.is_delete, self.grpc_cotea_endpoint, self.configuration_tool, node,
node_name,
self.host_ip_parameter, self.definitions[node[TYPE]],
is_software_component=is_software_component)
provider_nodes[node_name] = provider_node_instance
return provider_nodes
def _provider_relations(self):
provider_relations = dict()
for rel_name, rel_body in self.relationship_templates.items():
provider_rel_instance = ProviderResource(self.provider, self.is_delete, self.grpc_cotea_endpoint, self.configuration_tool, rel_body,
rel_name,
self.host_ip_parameter, self.definitions[rel_body[TYPE]],
is_relationship=True,
relation_target_source=self._relation_target_source)
provider_relations[rel_name] = provider_rel_instance
return provider_relations
def _provider_nodes_by_name(self):
"""
Get provider_nodes_by_name
:return: self.provider_nodes_by_name
"""
provider_nodes_by_name = dict()
for node in self.provider_nodes:
provider_nodes_by_name[node.nodetemplate.name] = node
return provider_nodes_by_name
def sort_nodes_and_operations_by_graph_dependency(self):
"""
This method generates dict fith ProviderTemplates with operation, sorted by
dependencies from normative and provider TOSCA templates
"""
nodes = set(self.provider_nodes.keys())
nodes = nodes.union(set(self.provider_relations.keys()))
dependencies = {}
lifecycle = ['configure', 'start', 'stop'] # ['delete'] now we cant support deleting while creating,
# deleting operations executes only when --delete option activated
reversed_full_lifecycle = lifecycle[::-1] + ['create']
# generate only dependencies from nodes
for templ_name in nodes:
set_intersection = nodes.intersection(self.template_dependencies.get(templ_name, set()))
templ = self.provider_nodes.get(templ_name, self.provider_relations.get(templ_name))
(_, element_type, _) = utils.tosca_type_parse(templ.type)
if element_type == NODES:
if INTERFACES in templ.tmpl and 'Standard' in templ.tmpl[INTERFACES]:
new_operations = ['create']
# operation create always exists
for elem in lifecycle:
if elem in templ.tmpl[INTERFACES]['Standard']:
new_operations.append(elem)
# if there is any other operations - add ti new_operations and translate to dict
# in format {node.op: {node1, node2}}
# node requieres node1 and node2
if len(new_operations) == 1:
utils.deep_update_dict(dependencies, {templ_name + SEPARATOR + 'create': set_intersection})
else:
for i in range(1, len(new_operations)):
utils.deep_update_dict(dependencies, {
templ_name + SEPARATOR + new_operations[i]: {
templ_name + SEPARATOR + new_operations[i - 1]}})
utils.deep_update_dict(dependencies,
{templ_name + SEPARATOR + new_operations[0]: set_intersection})
else:
utils.deep_update_dict(dependencies, {templ_name + SEPARATOR + 'create': set_intersection})
new_dependencies = {}
# new_dependencies is needed for updating set operations
# dict must be in format {node.op: {node1, node2}}
for key, value in dependencies.items():
new_set = set()
for elem in value:
for oper in reversed_full_lifecycle:
if elem + SEPARATOR + oper in dependencies:
new_set.add(elem + SEPARATOR + oper)
break
elif elem in dependencies:
new_set.add(elem)
break
new_dependencies[key] = new_set
# Adding relationships operations pre_configure_source after create source node
# pre_configure_target after create target node
# add_source in parallel with pre_configure_source but in will be executed on target
# post_configure_target after configure target node (if not configure then create - in parallel
# with pre_configure_target)
# post_configure_source after configure target node (if not configure then create - in parallel
# with pre_configure_source)
# other - not supported!
for templ_name in nodes:
templ = self.provider_nodes.get(templ_name, self.provider_relations.get(templ_name))
(_, element_type, _) = utils.tosca_type_parse(templ.type)
if element_type == RELATIONSHIPS:
if INTERFACES in templ.tmpl and 'Configure' in templ.tmpl[INTERFACES]:
if 'pre_configure_source' in templ.tmpl[INTERFACES]['Configure']:
new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.source,
'pre_configure_source', 'create', ['add_source'])
if 'pre_configure_target' in templ.tmpl[INTERFACES]['Configure']:
new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.target,
'pre_configure_target', 'create')
if 'post_configure_source' in templ.tmpl[INTERFACES]['Configure']:
if templ.source + SEPARATOR + 'configure' in new_dependencies:
new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.source,
'post_configure_source', 'configure')
else:
new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.source,
'post_configure_source', 'create')
if 'post_configure_target' in templ.tmpl[INTERFACES]['Configure']:
if templ.target + SEPARATOR + 'configure' in new_dependencies:
new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.target,
'post_configure_target', 'configure')
else:
new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.target,
'post_configure_target', 'create')
if 'add_source' in templ.tmpl[INTERFACES]['Configure']:
new_dependencies = self.update_relationships(new_dependencies, templ.name, templ.source,
'add_source', 'create', ['pre_configure_source'])
if 'add_target' in templ.tmpl[INTERFACES]['Configure']:
logging.warning('Operation add_target not supported, it will be skipped')
if 'target_changed' in templ.tmpl[INTERFACES]['Configure']:
logging.warning('Operation target_changed not supported, it will be skipped')
if 'remove_target' in templ.tmpl[INTERFACES]['Configure']:
logging.warning('Operation remove_target not supported, it will be skipped')
# mapping strings 'node.op' to provider template of this node with this operation
templ_mappling = {}
for elem in new_dependencies:
templ_name = elem.split(SEPARATOR)[0]
templ = copy.deepcopy(self.provider_nodes.get(templ_name, self.provider_relations.get(templ_name)))
templ.operation = elem.split(SEPARATOR)[1]
if INTERFACES in templ.tmpl:
if 'Configure' in templ.tmpl[INTERFACES]:
templ.tmpl[INTERFACES]['Configure'] = {templ.operation: templ.tmpl[INTERFACES]['Configure'][templ.operation]}
if 'Standard' in templ.tmpl[INTERFACES]:
templ.tmpl[INTERFACES]['Standard'] = {templ.operation: templ.tmpl[INTERFACES]['Standard'][templ.operation]}
templ_mappling[elem] = templ
templ_dependencies = {}
reversed_templ_dependencies = {}
# create dict where all elements will be replaced with provider template from templ_mappling
# reversed_templ_dependencies needed for delete - it just a reversed version of graph
for key, value in new_dependencies.items():
new_list = []
for elem in value:
new_list.append(templ_mappling[elem])
if templ_mappling[elem] not in reversed_templ_dependencies:
reversed_templ_dependencies[templ_mappling[elem]] = [templ_mappling[key]]
elif templ_mappling[key] not in reversed_templ_dependencies[templ_mappling[elem]]:
reversed_templ_dependencies[templ_mappling[elem]].append(templ_mappling[key])
templ_dependencies[templ_mappling[key]] = new_list
if len(templ_dependencies) <= 1:
reversed_templ_dependencies = copy.copy(templ_dependencies)
return templ_dependencies, reversed_templ_dependencies
def update_relationships(self, new_dependencies, templ_name, direction, rel_name, post_op, banned_ops=[]):
utils.deep_update_dict(new_dependencies, {
templ_name + SEPARATOR + rel_name: {direction + SEPARATOR + post_op}})
for key, value in new_dependencies.items():
for elem in value:
if elem == direction + SEPARATOR + post_op and key != templ_name + SEPARATOR + rel_name and \
key not in [templ_name + SEPARATOR + x for x in banned_ops]:
utils.deep_update_dict(new_dependencies,
{key: {templ_name + SEPARATOR + rel_name}})
return new_dependencies
def _get_full_defintion(self, definition, def_type, ready_set):
if def_type in ready_set:
return definition, def_type in self.software_types
(_, _, def_type_short) = utils.tosca_type_parse(def_type)
is_software_type = def_type_short == 'SoftwareComponent'
is_software_parent = False
parent_def_name = definition.get(DERIVED_FROM, None)
if parent_def_name is not None:
if def_type == parent_def_name:
logging.critical("Invalid type \'%s\' is derived from itself" % def_type)
raise Exception("Invalid type \'%s\' is derived from itself" % def_type)
if parent_def_name in ready_set:
parent_definition = self.definitions[parent_def_name]
is_software_parent = parent_def_name in self.software_types
else:
parent_definition, is_software_parent = \
self._get_full_defintion(self.definitions[parent_def_name], parent_def_name, ready_set)
parent_definition = copy.deepcopy(parent_definition)
definition = utils.deep_update_dict(parent_definition, definition)
if is_software_type or is_software_parent:
self.software_types.add(def_type)
ready_set.add(def_type)
return definition, def_type in self.software_types
def fulfil_definitions_with_parents(self):
ready_definitions = set()
for def_name, definition in self.definitions.items():
self.definitions[def_name], _ = self._get_full_defintion(definition, def_name, ready_definitions)
if self.definitions[def_name].get(DERIVED_FROM):
del self.definitions[def_name][DERIVED_FROM]
|
sadimer/clouni_configuration_tool
|
configuration_tool/providers/common/tosca_template.py
|
tosca_template.py
|
py
| 22,563 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32659197304
|
from werkzeug.exceptions import ClientDisconnected
from flask import Flask, request
from flask import current_app
from flask_cache import Cache
from mongoengine import connect
from flask_superadmin import Admin
from flask_mail import Mail
from flaskext.markdown import Markdown
from flask_restful import Api
from reverse_proxied import ReverseProxied
from assets import assets
import json
class ExtensionAccessObject(object):
def __init__(self):
self.cache = Cache(current_app, config={'CACHE_TYPE': 'simple'})
self.mongo = connect(current_app.config["MONGO_DB"])
self.mail = Mail(current_app)
self.admin = Admin(current_app)
self.rest_api = Api(current_app, prefix="/api")
self.markdown = Markdown(current_app, safe_mode="escape")
self.assets = assets(current_app)
def construct_application(config_override=None):
# Setup App
application = Flask(__name__)
# Setup Extensions
ReverseProxied(application)
# Setup Jinja Env
application.jinja_env.add_extension('jinja2.ext.do')
from util import pretty_date_since, full_date
application.jinja_env.filters['pretty_date'] = pretty_date_since
application.jinja_env.filters['full_date'] = full_date
application.jinja_env.filters['json_dump'] = json.dumps
# Load local_config
with application.app_context():
from config import local_config
application.config.from_object(local_config)
application.config.from_object(config_override)
with application.app_context():
application.extension_access_object = ExtensionAccessObject()
# Load blueprints files
with application.app_context():
from config import blueprint_config
application.config.from_object(blueprint_config)
# Setup blueprints from config
for blueprint in application.config["BLUEPRINTS"]: # TODO: Find a way to replace this, its shit
application.register_blueprint(**blueprint)
# Read the git hash from a file. This should be set by the deploy script
try:
with open('version_hash', 'r') as version_file:
application.config['version_hash'] = version_file.readline()
except IOError:
application.config['version_hash'] = "DEVELOP"
# Setup airbrake/errbit
if application.config.get('AIRBRAKE_ENABLED', True):
from airbrake import AirbrakeErrorHandler
from flask.signals import got_request_exception
@got_request_exception.connect_via(application)
def log_exception(sender, exception, **extra):
if isinstance(exception, (ClientDisconnected, )):
return
handler = AirbrakeErrorHandler(
api_key=application.config['AIRBRAKE_API_KEY'],
api_url=application.config['AIRBRAKE_API_URL'],
env_name=application.config['version_hash'],
env_variables={'type': 'caught'},
request_url=request.url,
request_path=request.path,
request_method=request.method,
request_args=request.args,
request_headers=request.headers)
handler.emit(exception)
def log_error(exception):
handler = AirbrakeErrorHandler(
api_key=application.config['AIRBRAKE_API_KEY'],
api_url=application.config['AIRBRAKE_API_URL'],
env_name=application.config['version_hash'],
env_variables={'type': 'logged'},
request_url=request.url,
request_path=request.path,
request_method=request.method,
request_args=request.args,
request_headers=request.headers)
handler.emit(exception)
application.log_error = log_error
else:
def dummy_log_error(exception):
print(exception)
application.log_error = dummy_log_error
# Load debug stuffs
if application.config['DEBUG']:
with application.app_context():
import debug
debug.setup_env()
return application
|
JunctionAt/JunctionWWW
|
constructor.py
|
constructor.py
|
py
| 4,126 |
python
|
en
|
code
| 1 |
github-code
|
6
|
477748253
|
import torch as t
import ipdb
class AttentionPooling(t.nn.Module):
def __init__(self, input_size, hidden_size, dropout):
super(AttentionPooling, self).__init__()
self.projection1 = t.nn.Linear(input_size, hidden_size, bias=True)
self.dropout = t.nn.Dropout(dropout)
self.projection2 = t.nn.Linear(hidden_size, 1, bias=False)
self.projection3 = t.nn.Linear(input_size, hidden_size)
t.nn.init.xavier_normal_(self.projection1.weight)
t.nn.init.xavier_normal_(self.projection2.weight)
t.nn.init.xavier_normal_(self.projection3.weight)
def forward(self, inputs, input_mask=None):
"""
:param inputs: [B, L, E]
:param input_mask: [B, L]
:return: [B, E]
"""
if input_mask is not None:
input_mask = input_mask.byte()
net = t.nn.functional.tanh(self.projection1(inputs))
# [B, L, H]
net = self.projection2(net).squeeze(-1)
# [B, L, 1]
if input_mask is not None:
net = net.masked_fill(1-input_mask, -float('inf'))
net = t.nn.functional.softmax(net, -1).unsqueeze(-1)
# [B, L, 1]
net = inputs * net
# [B, L, E]
net = net.sum(-2)
net = self.projection3(net)
# [B, E]
return net
|
CNDPlab/MSMARCO_Reshaped
|
Predictor/ModelUtils/query_pooling.py
|
query_pooling.py
|
py
| 1,316 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18716841287
|
#! /user/bin/env python
# -*- coding:utf-8 -*-
'''
็ฌๅๅ่กจไฟกๆฏ
'''
import json
from scrapy.http import Request
from scrapy.spiders import CrawlSpider
from douyin.items import DouyinCategoryItem
class categorySpider(CrawlSpider):
name = 'categorySpider'
redis_key = 'categorySpider'
cursor_num = 0
count_size = 10
url = "https://aweme.snssdk.com/aweme/v1/category/list/?version_code=181&count=10&cursor="
start_urls = [url + str(cursor_num)]
def parse(self, response):
jsonresp = json.loads(response.body_as_unicode())
if jsonresp['status_code'] == 0:
if jsonresp['has_more'] == 1:
aweme_list = list(jsonresp['category_list'])
for jsonobj in aweme_list:
item = self.init_item(jsonobj)
yield item
self.cursor_num += self.count_size
nexturl = self.url + str(self.cursor_num)
yield Request(nexturl, callback=self.parse)
else:
aweme_list = list(jsonresp['category_list'])
for jsonobj in aweme_list:
item = self.init_item(jsonobj)
yield item
def init_item(self, jsonobj):
item = DouyinCategoryItem()
if str(jsonobj['desc']) == "็ญ้จๆๆ":
item['category_type'] = jsonobj['desc']
item['category_id'] = jsonobj['challenge_info']['cid']
item['category_desc'] = jsonobj['challenge_info']['desc']
item['category_title'] = jsonobj['challenge_info']['cha_name']
item['category_url'] = jsonobj['challenge_info']['schema']
item['category_user_count'] = jsonobj['challenge_info']['user_count']
else:
# print("ๆง่ก็ญ้จ้ณไน่ตๅผ")
item['category_type'] = jsonobj['desc']
item['category_title'] = jsonobj['music_info']['title']
item['category_id'] = jsonobj['music_info']['mid']
item['category_url'] = 'https://api.amemv.com/aweme/v1/music/aweme/?music_id=' + \
str(jsonobj['music_info']['mid'])
item['category_desc'] = jsonobj['music_info']['offline_desc']
item['category_user_count'] = jsonobj['music_info']['user_count']
return item
|
gisShield/douyin
|
douyin/spiders/categoryspider.py
|
categoryspider.py
|
py
| 2,313 |
python
|
en
|
code
| 24 |
github-code
|
6
|
73027828029
|
import senticnet5 as sent_dict
import pandas as pd
import numpy as np
from itertools import islice
from sklearn.model_selection import train_test_split
import re
# returns numpy array
def get_ratings(ratings_filename):
return np.load(ratings_filename)
# returns array of document arrays with words
def get_reviews(reviews_filename):
reviews = []
with open(reviews_filename, "r") as f:
for line in f:
reviews.append([w.lower() for w in re.sub('[^A-Za-z \']+', "", line).split()])
return reviews
# returns word polarity: float
# if word not in dictionary return None
def word_polarity(word):
try:
return float(sent_dict.senticnet[word][7])
except:
return None
# return average polarity of a given document
# if none of the words are in dictionary return None
# accounts all single words and combinations of 2 words
def document_polarity(doc):
polarity_sum = 0.0
num_words_accounted = 0
phrases = get_phrases(doc, 2)
for phrase in phrases:
current_polarity = word_polarity(phrase)
if current_polarity is not None:
polarity_sum += current_polarity
num_words_accounted += 1
if num_words_accounted > 0:
return polarity_sum / num_words_accounted
return None
# calculates polarities for given txt file with documents
# saves dictionary with average document polarity at given rating and number of rating occurrences
def train(filename):
print("TRAINING SIMPLE SENTIMENT")
results = {
0.0: [0.0, 0], # average polarity at given rating
1.0: [0.0, 0],
2.0: [0.0, 0],
3.0: [0.0, 0],
4.0: [0.0, 0],
"Undefined": [0.0, 0] # if polarity can't be determined use this to determine average rating for such occurrences
}
ratings = get_ratings(filename + "_ratings.npy")
reviews = get_reviews(filename + "_reviews.txt")
x_train, x_test, y_train, y_test = train_test_split(reviews, ratings, test_size=0.2, random_state=1)
for doc, rating in zip(x_train, y_train):
polarity = document_polarity(doc)
if polarity is None:
results["Undefined"][0] += rating
results["Undefined"][1] += 1
else:
results[rating][0] += polarity
results[rating][1] += 1
for key in results:
results[key][0] = results[key][0] / max(results[key][1], 1)
pd.DataFrame(results).to_csv(filename + "_polarities.csv")
# gives rating prediction based on closest average document polarity
def predictions(filename):
print("PREDICTING SIMPLE SENTIMENT")
predictions = []
ratings = get_ratings(filename + "_ratings.npy")
reviews = get_reviews(filename + "_reviews.txt")
rating_polarities = pd.read_csv(filename + "_polarities.csv")
default_rating = float(round(rating_polarities.loc[0, "Undefined"]))
polarities = rating_polarities[["0.0", "1.0", "2.0", "3.0", "4.0"]].iloc[0].tolist()
x_train, x_test, y_train, y_test = train_test_split(reviews, ratings, test_size=0.2, random_state=1)
for doc, rating in zip(x_test, y_test):
polarity = document_polarity(doc)
prediction = default_rating
if polarity is not None:
prediction = float(polarities.index(min(polarities, key=lambda x:abs(x - polarity))))
predictions.append(prediction)
pd_ratings = pd.Series(ratings[:len(predictions)], name="Actual")
pd_predictions = pd.Series(predictions, name="Predicted")
confusion_matrix = pd.crosstab(pd_predictions, pd_ratings)
return confusion_matrix
# generates exhaustible sliding window over a sequence
# [1, 2, 3, 4], 2 => 12 23, 34, 4
# [1, 2, 3, 4], 3 => 123, 234, 34, 4
def get_windows(sequence, n):
windows = []
for i, x in enumerate(sequence):
windows.append(list(islice(sequence, i, i+n)))
return windows
# returns all combinations retaining the order
# eg. 1, 2, 3 => 1, 1_2, 1_2_3
def get_combinations(sequence):
combinations = []
for i, x in enumerate(sequence):
combinations.append("_".join(sequence[:i] + [x]))
return combinations
# returns all posible combinations with a sliding window
# eg. window_size = 2
# 1, 2, 3, 4 => 1, 1_2, 2, 2_3, 3, 3_4,
def get_phrases(doc, window_size):
phrases = []
for window in get_windows(doc, window_size):
phrases += get_combinations(window)
return phrases
|
jgombac/RatingPredictor
|
simple_sentiment.py
|
simple_sentiment.py
|
py
| 4,427 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41400484106
|
#!/usr/bin/env python3
__author__ = 'smw'
__email__ = '[email protected]'
__status__ = 'Development'
import os
import sys
import arcpy
import timeit
start = timeit.default_timer()
lcm_vector = r'E:\land-cover-map\data\LCM2015_GB.gdb\lcm2015gbvector'
print('\n\nlcm_vector:\t\t{0}'.format(lcm_vector))
shp_folder = r'E:\land-cover-map\data\ShapeFiles'
print('\n\nshp_folder:\t\t{0}'.format(shp_folder))
out_gdb = r'E:\land-cover-map\data\out_gdb.gdb'
print('\n\nout_gdb:\t\t{0}'.format(out_gdb))
if arcpy.Exists(out_gdb):
print('\n\nout_gdb exists.')
else:
print('\n\nCreating out_gdb...')
arcpy.CreateFileGDB_management(out_folder_path=os.path.dirname(out_gdb),
out_name=os.path.basename(out_gdb))
print('\n\nLooping through shapefiles...')
arcpy.env.workspace = shp_folder
featureclasses = arcpy.ListFeatureClasses(wild_card='*',
feature_type='Polygon')
for fc in featureclasses:
print('\tfc:\t\t{0}'.format(fc))
out_fc = os.path.join(out_gdb,
'{0}_{1}'.format(os.path.basename(lcm_vector), os.path.splitext(fc)[0]))
print('\t\tout_fc:\t\t{0}'.format(out_fc))
if arcpy.Exists(out_fc):
arcpy.Delete_management(out_fc)
# print('\t\tClipping...')
# arcpy.Clip_analysis(in_features=lcm_vector,
# clip_features=fc,
# out_feature_class=out_fc)
print('\t\tSelecting...')
fl = 'featurelayer'
if arcpy.Exists(fl):
arcpy.Delete_management(fl)
arcpy.MakeFeatureLayer_management(in_features=lcm_vector,
out_layer=fl)
arcpy.SelectLayerByLocation_management(in_layer=fl,
overlap_type='INTERSECT',
select_features=fc,
selection_type='NEW_SELECTION')
selected_features = int(arcpy.GetCount_management(fl)[0])
print('\t\tselected_features:\t\t{0}'.format(selected_features))
if selected_features > 0:
arcpy.CopyFeatures_management(in_features=fl,
out_feature_class=out_fc)
copied_features = int(arcpy.GetCount_management(out_fc)[0])
print('\t\tcopied_features:\t\t{0}'.format(copied_features))
if arcpy.Exists(fl):
arcpy.Delete_management(fl)
stop = timeit.default_timer()
total_time = stop - start
mins, secs = divmod(total_time, 60)
hours, mins = divmod(mins, 60)
print('\n\nTotal running time:\t\t{0}:{1}:{2:.2f}\n'.format(str(int(hours)).zfill(2), str(int(mins)).zfill(2), secs))
|
smwCEH/land-cover-map-clipping
|
multi-clip.py
|
multi-clip.py
|
py
| 2,658 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71177753467
|
from math import *
from operator import concat
from random import randint, random, choice, uniform
import colorsys
import pysvg.structure
import pysvg.builders
img_side = 1024
img_xmid = img_side/2
img_ymid = img_side/2
starting_rad = 128
class Circle:
__slots__ = ('x', 'y', 'rad', 'depth')
def __init__(self, x, y, rad, depth):
self.x = x
self.y = y
self.rad = rad
self.depth = depth
circles = []
def rgb_to_hex(rgb):
return "#" + reduce(concat, map(lambda x: "%02x" % x, rgb))
def make_child(circle):
# ang = random() * (1 * pi)
ang = uniform(1.9 * pi, 2.1 * pi) if random() < 0.10 else uniform(.9 * pi, 1.1 * pi)
px = (2 * circle.rad) * cos(ang) + circle.x
py = (2 * circle.rad) * sin(ang) + circle.y
return Circle(px, py, circle.rad * 0.5, circle.depth + 1)
def make_tree(root, branch, depth):
if depth == 0:
return
children = []
for _ in range(branch):
child = make_child(root)
children.append(child)
circles.append(child)
for child in children:
make_tree(child, branch, depth - 1)
# Add the root
root = Circle(img_xmid, img_ymid, starting_rad, 0)
circles.append(root)
# Make the tree
depth = 6
branching_factor = 7
make_tree(root, branching_factor, depth)
# Make the SVG
svg = pysvg.structure.svg()
sb = pysvg.builders.ShapeBuilder()
bot_y = min(circles, key = lambda circ: circ.y).y
top_y = max(circles, key = lambda circ: circ.y).y
bot_x_circ = min(circles, key = lambda circ: circ.x - circ.rad)
bot_x = bot_x_circ.x - bot_x_circ.rad
bot_y_circ = min(circles, key = lambda circ: circ.y - circ.rad)
bot_y = bot_y_circ.y - bot_y_circ.rad
highest_dist_circ = max(circles, key = lambda circ: sqrt((circ.x - bot_x)**2 + (circ.y-bot_y)**2))
highest_dist = sqrt((highest_dist_circ.x - bot_x)**2 + (highest_dist_circ.y - bot_y)**2)
for circ in circles:
# darkness = (float(circ.depth) / depth) * 255
# light = float(circ.depth) / depth
light = 0.5
# hue = float(circ.y - bot_y) / (top_y - bot_y)
hue = sqrt((circ.x - bot_x)**2 + (circ.y - bot_y)**2) / highest_dist
hue += choice((-1, 1)) * random() * 0.25
# sat = float(circ.depth) / depth
sat = 0.5
rgb = map(lambda x: int(255 * x), colorsys.hls_to_rgb(hue, light, sat))
color = rgb_to_hex(rgb)
if(circ.depth > 2):
svg.addElement(sb.createCircle(circ.x - bot_x, circ.y - bot_y, circ.rad, strokewidth = 0, fill = color))
# Hack fill opacity in because PySVG doesn't have it :(
xml = svg.getXML().replace("; \"", "; fill-opacity:0.75; \"")
with open("angletree.svg", "w") as f:
f.write(xml)
|
bitbanger/algoart
|
angletree.py
|
angletree.py
|
py
| 2,549 |
python
|
en
|
code
| 13 |
github-code
|
6
|
37226419252
|
N = int(input())
A = list(map(int, input().split()))
MaxVal = 0
for l in range(N):
for m in range(l+1, N+1):
x = min(A[l:m]) * (m - l)
if x > MaxVal:
MaxVal = x
print(MaxVal)
|
konamilk/atcoder-abc189
|
C.py
|
C.py
|
py
| 211 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37392717665
|
import streamlit as st
import requests,random,time
from deta import Deta
deta = Deta(st.secrets['key'])
# Base key
db= deta.Base("usernames")
st.set_page_config(page_title="Github Shoutout",page_icon="images/githublogo.png",layout="centered",initial_sidebar_state="auto") # setting the page config
def verifying(username):
if username:
try:
api_url = f"https://api.github.com/users/{username}" # api url
response = requests.get(api_url) # get response
data = response.json() # parse data as json
if db.get(username):
st.warning("Username already exists")
elif data["followers"] and data["name"] and data["bio"]: # if followers or following is not zero
db.put({"key":username}) # add entryin database with key lowercase username
st.success("Username stored in database.")
else:
st.error("Sorry, you don't have followers or your name and bio is not setup")
except Exception as e: # if username is not valid
print(e)
st.error("Invalid github username")
def random_username():
names = db.fetch().items
github_username=list(names[random.randint(0,len(names)-1)].values())[0]
try:
api_url = f"https://api.github.com/users/{github_username}" # api url
response = requests.get(api_url)
data = response.json()
acc_link=data['html_url']
st.markdown(f"""<div id='container'><img id='pfp' src="https://github.com/{github_username}.png" alt="github profile pic"/>
<h3>Name: {data['name']}</h3>
<p id="bio">Bio: {data['bio']}</p>
<p id="ff">Followers: {data["followers"]} | Following: {data["following"]}</p>
<table>
<tr>
<th>Stats</th>
<th>Streak</th>
<th>Languages</th>
</tr>
<tr>
<td><img src='http://github-profile-summary-cards.vercel.app/api/cards/stats?username={github_username}&theme=github_dark' width=200px height=100px></td>
<td><img src='https://streak-stats.demolab.com?user={github_username}&theme=github-dark&hide_border=true&border_radius=32&date_format=j%20M%5B%20Y%5D&ring=888888' width=180px height=100px></td>
<td><img src='http://github-profile-summary-cards.vercel.app/api/cards/repos-per-language?username={github_username}&theme=github_dark' width= 200px height=100px></td>
</tr>
</table><br><br>
<a target="_blank" href="{acc_link}">
<button id='btn'>
Follow {github_username} on GitHub
</button><br><br>
</a></div>""",unsafe_allow_html=True) #displaying the data
#
except Exception as e:
st.error("Something went wrong, try again later")
def main():
st.markdown("""<a href='https://github.com/samadpls/Github-Shoutout'><img src='https://img.shields.io/github/stars/samadpls/Github-Shoutout?color=red&label=star%20me&logoColor=red&style=social'></a>""",unsafe_allow_html=True)
img , heading = st.columns([1,8]) # using columns to display the heading and image
with img:
st.image("images/githublogo.png",width=70) # github logo
with heading:
st.markdown('# Shoutout to Github User') # heading
st.markdown("`Click on the button to see the profile`") # description
if st.button("Press Me"):
with st.spinner('Wait for it...'):
time.sleep(2)
random_username()
#New username
with st.expander("Add your profile :"): # sub header
text = st.empty()
username=text.text_input("Enter your github username",max_chars=40)
st.markdown(""" `
Made with ๐ค by samadpls
`
""") # footer
verifying(username.strip().lower())
if __name__=="__main__":
with open('styles.css') as f:
st.markdown(f"<style>{f.read()}</style>",unsafe_allow_html=True) # loading the css file
main()
|
samadpls/Github-Shoutout
|
app.py
|
app.py
|
py
| 4,179 |
python
|
en
|
code
| 10 |
github-code
|
6
|
692617342
|
#!/usr/bin/python
import keras_ocr
import sys
if __name__ == "__main__":
image_filepath = sys.argv[0]
recognizer = keras_ocr.recognition.Recognizer()
recognizer.compile()
recognizer.model.load_weights('./assets/dataset/trained_recognizer.h5')
predicted = recognizer.recognize(image_filepath)
print(" prediction:",predicted)
|
prakashsellathurai/ML-ASSIGNMENT-IDfy
|
predict.py
|
predict.py
|
py
| 358 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29836585751
|
# #Unzip the test directory
# !unzip drive/My\ Drive/CatVSDog/test1.zip
# #Unzip the train directory
# !unzip drive/My\ Drive/CatVSDog/train.zip
# Plotting the images of dog
import shutil
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import MaxPooling2D
from keras.layers import Conv2D
from keras.models import Sequential
import random
import os
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import load_img
from numpy import save
from numpy import asarray
from os import listdir
from matplotlib import pyplot
from matplotlib.image import imread
folder = 'train/'
for i in range(9):
# define subplot
pyplot.subplot(330+1+i)
# define the filename
filename = folder + 'dog.'+str(i)+'.jpg'
# load image pixels
image = imread(filename)
# plot raw pixel data
pyplot.imshow(image)
pyplot.show()
# Plotting the images of cat
folder = 'train/'
for i in range(9):
# define subplot
pyplot.subplot(330+1+i)
# define the filename
filename = folder + 'cat.'+str(i)+'.jpg'
# load image pixels
image = imread(filename)
# plot raw pixel data
pyplot.imshow(image)
pyplot.show()
# define location of dataset
folder = 'train/'
photos, labels = list(), list()
# enumerate files in the directory
# for file in listdir(folder):
# #determine class
# output = 0.0
# if file.startswith('cat'):
# output = 1.0
# #load image
# photo = load_img(folder+file,target_size = (200,200))
# photo = img_to_array(photo)
# #store
# photos.append(photo)
# labels.append(output)
# #convert to a numpy arrays
# photos = asarray(photos)
# labels = asarray(labels)
# print(photos.shape,labels.shape)
# #save the reshaped photos
# save('dogs_vs_cats_photos.npy',photos)
# save('dogs_vs_cats_labels.npy',labels)
# #loading from numpy data
# from numpy import load
# photos = load('dogs_vs_cats_photos.npy')
# labels = load('dogs_vs_cats_labels.npy')
# print(photos.shape,labels.shape)
# Alternate method
# creating seperate directory for test->cat and test->dog as this is required
dataset_home = 'dataset_dogs_vs_cats/'
subdirs = ['train/', 'test/']
for subdir in subdirs:
labeldirs = ['dogs/', 'cats/']
for labeldir in labeldirs:
newdir = dataset_home+subdir+labeldir
os.makedirs(newdir, exist_ok=True)
print("DONE")
# Partitioning the test and train sets
random.seed(1)
val_ratio = 0.25
src_directory = 'train/'
for file in listdir(src_directory):
src = src_directory+'/'+file
dst_dir = 'train/'
if random.random() < val_ratio:
dst_dir = 'test/'
if file.startswith('cat'):
dst = dataset_home+dst_dir+'cats/'+file
shutil.copyfile(src, dst)
elif file.startswith('dog'):
dst = dataset_home + dst_dir+'dogs/'+file
shutil.copyfile(src, dst)
# Initialising the CNN
classifier = Sequential()
# Convolution
classifier.add(Conv2D(32, (3, 3), input_shape=(
200, 200, 3), activation='relu'))
# Pooling
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(32, (3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Flattening
classifier.add(Flatten())
# Full connection
classifier.add(Dense(units=128, activation='relu'))
classifier.add(Dense(units=1, activation='sigmoid'))
# Loading the model
# classifier.load_weights("/kaggle/output/weights.best.hdf5")
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory('dataset_dogs_vs_cats/train/',
target_size=(200, 200),
batch_size=32,
class_mode='binary')
test_set = test_datagen.flow_from_directory('dataset_dogs_vs_cats/test/',
target_size=(200, 200),
batch_size=32,
class_mode='binary')
# Select the path to store the final checkpoint after a epoch
filepath = "weights.best.hdf5"
checkpoint = ModelCheckpoint(
filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
classifier.fit_generator(training_set,
steps_per_epoch=8000,
epochs=50,
validation_data=test_set,
callbacks=callbacks_list,
validation_steps=2000)
|
mcaupybugs/CatsVSDogs
|
catvsdog.py
|
catvsdog.py
|
py
| 4,994 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25066459905
|
from django.contrib.auth import mixins
from oauth2_provider.contrib.rest_framework import (
OAuth2Authentication as BaseOAuth2Authentication,
)
from purplship.server.core.authentication import (
JWTAuthentication,
TokenAuthentication,
get_request_org,
)
class OAuth2Authentication(BaseOAuth2Authentication):
def authenticate(self, request):
auth = super().authenticate(request)
if auth is not None:
user, _ = auth
request.org = get_request_org(request, user)
return auth
class AccessMixin(mixins.AccessMixin):
"""Verify that the current user is authenticated."""
def dispatch(self, request, *args, **kwargs):
try:
auth = (
OAuth2Authentication().authenticate(request)
or JWTAuthentication().authenticate(request)
or TokenAuthentication().authenticate(request)
)
if auth is not None:
user, *_ = auth
request.user = user
finally:
return super().dispatch(request, *args, **kwargs)
|
danh91/purplship
|
insiders/server/iam/purplship/server/iam/authentication.py
|
authentication.py
|
py
| 1,109 |
python
|
en
|
code
| null |
github-code
|
6
|
4956999019
|
import sys
def herdle():
puzzle_answer = []
puzzle_guess = []
def split_into_chars(string):
res = []
for char in string:
res.append(char)
return res
answer_as_oned = []
for _ in range(3):
temp = sys.stdin.readline().strip()
splitted = split_into_chars(temp)
answer_as_oned += splitted
puzzle_answer.append(splitted)
for _ in range(3):
temp = sys.stdin.readline().strip()
splitted = split_into_chars(temp)
puzzle_guess.append(splitted)
same_place = 0
exists = 0
for x in range(3):
for y in range(3):
if puzzle_answer[x][y] == puzzle_guess[x][y]:
same_place += 1
answer_as_oned.remove(puzzle_answer[x][y])
for x in range(3):
for y in range(3):
if puzzle_guess[x][y] in answer_as_oned and puzzle_answer[x][y] != puzzle_guess[x][y]:
exists += 1
answer_as_oned.remove(puzzle_guess[x][y])
return [same_place, exists]
answer = herdle()
print(answer[0])
print(answer[1])
|
jjliewie/usaco-jan-bronze-2022
|
herdle-answer.py
|
herdle-answer.py
|
py
| 1,116 |
python
|
en
|
code
| 3 |
github-code
|
6
|
29916785411
|
import unittest
from livecli.plugins.vgtv import VGTV
class TestPluginVGTV(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
"http://ap.vgtv.no/webtv/video/114339/tempo-sport-motorsykkelen-som-gjenoppstod",
"http://ap.vgtv.no/webtv#!/video/114339/tempo-sport-motorsykkelen-som-gjenoppstod",
"https://tv.aftonbladet.se/abtv/articles/243105",
"https://www.vgtv.no/live/139125/sportsnyhetene-doegnet-rundt",
"https://www.vgtv.no/video/153967/vi-fulgte-hopp-stor-bakke-menn",
]
for url in should_match:
self.assertTrue(VGTV.can_handle_url(url))
should_not_match = [
"https://ap.vgtv.no",
]
for url in should_not_match:
self.assertFalse(VGTV.can_handle_url(url))
|
ariesw/livecli
|
tests/test_plugin_vgtv.py
|
test_plugin_vgtv.py
|
py
| 828 |
python
|
no
|
code
| 0 |
github-code
|
6
|
26632967906
|
import collections
import math
import socket
DEF_MACADDR = ['2VTX', '2VR7', '2ZX7', '2VN8']
# This class read data from watches via UDP.
class watchData(object):
def __init__(self, ip, port, watch_num, watch_queue):
self.ip = ip
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.watch_num = watch_num
self.data_queue = watch_queue
def sock_bind(self):
self.sock.bind((self.ip, self.port))
def read(self):
while True:
data, addr = self.sock.recvfrom(1024)
parsed_data = data.split(' ')
if (parsed_data[2] == '3'):
gyro_x = float(parsed_data[3])
gyro_y = float(parsed_data[4])
gyro_z = float(parsed_data[5])
gyro_mag = math.sqrt(gyro_x*gyro_x + gyro_y*gyro_y + gyro_z*gyro_z) * 57.3
for i in range(self.watch_num):
if (parsed_data[0] == DEF_MACADDR[i]):
self.data_queue[i].append(gyro_mag)
def get_data(self):
return self.data_queue
|
jianwuesp/Registration
|
watch.py
|
watch.py
|
py
| 1,108 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34540343891
|
import argparse
import sys
from operator import add
import os
import shlex
import shutil
from subprocess import Popen, PIPE
from pyspark import SparkContext, SparkConf
import pyspark.serializers
import subprocess
import boto3
import re
global parser_result
if sys.version > "3.4":
pyspark.serializers.protocol = 4
APPLICATION_FOLDER = "/app"
GENOME_REFERENCES_FOLDER = "/mnt/ref"
TEMP_OUTPUT_FOLDER = "/mnt/output"
HDFS_TEMP_OUTPUT_FOLDER = "/tmp/sam_chunks"
#################################
# File splitting
#################################
def split_interleaved_file(file_prefix, file_content, output_dir):
"""
Unpacks an interleaved file into the standard FASTQ format
:param file_prefix: the prefix of the file name
:param file_content: the lines of content from the input file
:param output_dir: the location to store the unpacked files
:return: a tuple with first element being a list of output file names
(1 for se, 2 for pe); 2nd element a boolean flag - True if pe data,
False otherwise
"""
fastq_line_count_se = 4
fastq_line_count_pe = 8
paired_reads = False
output_file_names = []
file_prefix = output_dir + "/" + file_prefix
output_file = file_prefix + "_1.fq"
output_file_names.append(output_file)
output_file_writer = open(output_file, 'w')
count = 0
for line in file_content.strip().split("\n"):
# In the first line, check if it's paired or not
if count == 0 and len(line.strip().split("\t")) == fastq_line_count_pe:
paired_reads = True
output_file_pair = file_prefix + "_2.fq"
output_file_names.append(output_file_pair)
output_pair_writer = open(output_file_pair, 'w')
if paired_reads:
parts = line.strip().split("\t")
if len(parts) != fastq_line_count_pe:
continue
read_one = parts[:fastq_line_count_se]
read_two = parts[fastq_line_count_se:]
output_file_writer.write("\n".join(read_one) + "\n")
output_pair_writer.write("\n".join(read_two) + "\n")
else:
output_file_writer.writelines(line.strip().replace("\t", "\n") + "\n")
count += 1
output_file_writer.close()
if paired_reads:
output_pair_writer.close()
return output_file_names, paired_reads
#################################
# Aligner
#################################
def align_reads_star(sample_name, file_names, alignment_output_dir):
# If paired read flag is required
# paired_read = True if len(file_names) == 2 else False
print("Aligning reads...")
aligner_args = "{app_folder}/STAR/STAR --runThreadN 4 {aligner_extra_args} --genomeDir {index_folder} " \
"--readFilesIn {fastq_file_names} --outFileNamePrefix {output_folder} --outSAMtype BAM Unsorted".\
format(app_folder=APPLICATION_FOLDER,
aligner_extra_args="" if parser_result.aligner_extra_args is None else parser_result.aligner_extra_args,
index_folder=GENOME_REFERENCES_FOLDER + "/star_index",
fastq_file_names=" ".join(file_names),
output_folder=alignment_output_dir + "/")
print("Command: " + aligner_args)
aligner_process = Popen(shlex.split(aligner_args), stdout=PIPE, stderr=PIPE)
aligner_out, aligner_error = aligner_process.communicate()
if aligner_process.returncode != 0:
raise ValueError("STAR failed to complete (Non-zero return code)!\n"
"STAR stdout: {std_out} \nSTAR stderr: {std_err}".format(std_out=aligner_out.decode("utf8"),
std_err=aligner_error.decode("utf8")))
if aligner_error.decode("utf8").strip() != "" or not os.path.isfile(alignment_output_dir + "/Log.final.out"):
raise ValueError("STAR failed to complete (No output file is found)!\n"
"STAR stdout: {std_out} \nSTAR stderr: {std_err}".format(std_out=aligner_out.decode("utf8"),
std_err=aligner_error.decode("utf8")))
print('Completed reads alignment')
bam_file_name_output = "Aligned.out.bam"
return bam_file_name_output
def align_reads_hisat(sample_name, file_names, alignment_output_dir):
# If paired read flag is required
paired_read = True if len(file_names) == 2 else False
print("Aligning reads...")
if paired_read:
fastq_file_args = "-1 {} -2 {}".format(*file_names)
else:
fastq_file_args = "-U {}".format(*file_names)
aligner_args = "{app_folder}/hisat/hisat2 -p 4 --tmo {aligner_extra_args} -x {index_folder}/hisat2.index " \
"{fastq_file_names} -S {output_folder}/output.sam".\
format(app_folder=APPLICATION_FOLDER,
aligner_extra_args="" if parser_result.aligner_extra_args is None else parser_result.aligner_extra_args,
index_folder=GENOME_REFERENCES_FOLDER + "/hisat_index",
fastq_file_names=fastq_file_args,
output_folder=alignment_output_dir)
print("Command: " + aligner_args)
aligner_process = Popen(shlex.split(aligner_args), stdout=PIPE, stderr=PIPE)
aligner_out, aligner_error = aligner_process.communicate()
if aligner_process.returncode != 0:
raise ValueError("HISAT2 failed to complete (Non-zero return code)!\n"
"HISAT2 stdout: {std_out} \nHISAT2 stderr: {std_err}".format(std_out=aligner_out.decode("utf8"),
std_err=aligner_error.decode("utf8")))
print('Completed reads alignment')
samtools_args = "{app_folder}/samtools/samtools view -@ 4 -o {output_folder}/output.bam {output_folder}/output.sam". \
format(app_folder=APPLICATION_FOLDER,
output_folder=alignment_output_dir)
print("Command: " + samtools_args)
samtools_process = Popen(shlex.split(samtools_args), stdout=PIPE, stderr=PIPE)
samtools_out, samtools_error = samtools_process.communicate()
if samtools_process.returncode != 0:
raise ValueError("Samtools failed to complete (Non-zero return code)!\n"
"Samtools stdout: {std_out} \nSamtools stderr: {std_err}".format(
std_out=samtools_out.decode("utf8"), std_err=samtools_error.decode("utf8")))
sam_file_name_output = "output.bam"
return sam_file_name_output
def align_reads_subread(sample_name, file_names, alignment_output_dir):
# If paired read flag is required
paired_read = True if len(file_names) == 2 else False
print("Aligning reads...")
print("Aligning with subread")
if paired_read:
fastq_file_args = "-r {} -R {}".format(*file_names)
else:
fastq_file_args = "-r {}".format(*file_names)
aligner_args = "{app_folder}/subread/subread-align -T 4 -t 0 --SAMoutput {aligner_extra_args} " \
"-i {index_folder}/genome {fastq_file_names} -o {output_folder}/output.bam".\
format(app_folder=APPLICATION_FOLDER,
aligner_extra_args="" if parser_result.aligner_extra_args is None else parser_result.aligner_extra_args,
index_folder=GENOME_REFERENCES_FOLDER + "/subread_index",
fastq_file_names=fastq_file_args,
output_folder=alignment_output_dir)
print("Command: " + aligner_args)
aligner_process = Popen(shlex.split(aligner_args), stdout=PIPE, stderr=PIPE)
aligner_out, aligner_error = aligner_process.communicate()
if aligner_process.returncode != 0:
raise ValueError("Subread failed to complete (Non-zero return code)!\n"
"Subread stdout: {std_out} \nSubread stderr: {std_err}".format(std_out=aligner_out.decode("utf8"),
std_err=aligner_error.decode("utf8")))
print('Completed reads alignment')
sam_file_name_output = "output.bam"
return sam_file_name_output
#################################
# Main functions
#################################
def alignment_step(keyval):
# Input: file_name, file_content as key,val
# Output: [sample_name, file_name] as [key,val]
global parser_result
prefix_regex = r"(.*_part[0-9]*)\."
file_name, file_content = keyval
prefix_match = re.findall(prefix_regex, file_name.rstrip("/").split("/")[-1])
if len(prefix_match) != 1:
raise ValueError("Filename can not be resolved (invalid, pattern mismatch): {}".format(file_name))
prefix = prefix_match[0]
sample_name = prefix.rsplit("_part", 1)[0]
alignment_dir = TEMP_OUTPUT_FOLDER + "/alignment_" + prefix
try:
os.mkdir(alignment_dir)
except:
print('Alignment directory {} exist.'.format(alignment_dir))
print("Recreating FASTQ file(s)")
split_file_names, paired_reads = split_interleaved_file(prefix, file_content, alignment_dir)
print("Recreating FASTQ file(s) complete. Files recreated: {}".format(",".join(split_file_names)))
alignment_output_dir = alignment_dir + "/aligner_output"
try:
os.mkdir(alignment_output_dir)
except:
print('Alignment output directory {} exist.'.format(alignment_output_dir))
if parser_result.aligner.lower() == "star":
aligned_sam_output = align_reads_star(sample_name, split_file_names, alignment_output_dir)
elif parser_result.aligner.lower() == "hisat" or parser_result.aligner.lower() == "hisat2":
aligned_sam_output = align_reads_hisat(sample_name, split_file_names, alignment_output_dir)
elif parser_result.aligner.lower() == "subread":
aligned_sam_output = align_reads_subread(sample_name, split_file_names, alignment_output_dir)
else:
print("Aligner specified is not yet supported. Defaulting to STAR")
aligned_sam_output = align_reads_star(sample_name, split_file_names, alignment_output_dir)
aligned_output_filepath = "{}/{}".format(alignment_output_dir.rstrip("/"), aligned_sam_output)
aligned_output_hdfs_filepath = "{}/{}".format(HDFS_TEMP_OUTPUT_FOLDER, prefix)
subprocess.call(["hdfs", "dfs", "-rm", aligned_output_hdfs_filepath])
subprocess.call(["hdfs", "dfs", "-put", aligned_output_filepath, aligned_output_hdfs_filepath])
shutil.rmtree(alignment_dir, ignore_errors=True)
return sample_name, [prefix]
def fuse_alignment(keyval):
# Input: sample_name, [file_name,...] as key, val
# Output: sample_name
global parser_result
key, file_lists = keyval
fuse_alignment_dir = TEMP_OUTPUT_FOLDER.rstrip("/") + "/" + key
ordered_file_lists = sorted([(f, int(f.rsplit("part", 1)[-1])) for f in file_lists], key=lambda x:x[-1])
print(ordered_file_lists)
try:
os.mkdir(fuse_alignment_dir)
except:
print('Fuse alignment directory {} exist.'.format(fuse_alignment_dir))
fuse_alignment_file = key + ".bam"
previous_file_path = ""
for index, file_name_pair in enumerate(ordered_file_lists):
file_name, number = file_name_pair
local_file_path = fuse_alignment_dir + "/" + file_name + ".bam"
subprocess.call(["hdfs", "dfs", "-get", HDFS_TEMP_OUTPUT_FOLDER.rstrip("/") + "/" + file_name, local_file_path])
if index != 0:
new_merged_file_path = "{}/temp_{}.bam".format(fuse_alignment_dir, index)
subprocess.call(["samtools", "cat", "-o", new_merged_file_path, previous_file_path, local_file_path])
os.remove(previous_file_path)
os.remove(local_file_path)
previous_file_path = new_merged_file_path
else:
previous_file_path = local_file_path
subprocess.call(["hdfs", "dfs", "-rm", HDFS_TEMP_OUTPUT_FOLDER.rstrip("/") + "/" + file_name])
if parser_result.output_dir.startswith("s3://"): # From S3
s3_client = boto3.client('s3', region_name=parser_result.aws_region)
print("uploading to S3")
output_bucket, key_prefix = parser_result.output_dir.strip().strip("/")[5:].split("/", 1)
s3_client.upload_file(previous_file_path, output_bucket, key_prefix + "/" + fuse_alignment_file)
else:
print("outputting to HDFS")
subprocess.call(["hdfs", "dfs", "-mkdir", "-p", parser_result.output_dir.rstrip("/")])
subprocess.call(["hdfs", "dfs", "-put", previous_file_path, parser_result.output_dir.rstrip("/") + "/" +
fuse_alignment_file])
os.remove(previous_file_path)
return key
if __name__ == "__main__":
global parser_result
parser = argparse.ArgumentParser(description='Spark-based RNA-seq Pipeline Alignment')
parser.add_argument('--input', '-i', action="store", dest="input_dir", help="Input directory - HDFS or S3")
parser.add_argument('--output', '-o', action="store", dest="output_dir", help="Output directory - HDFS or S3")
parser.add_argument('--aligner_tools', '-at', action="store", dest="aligner", nargs='?',
help="Aligner to be used (STAR|HISAT2|Subread)", default="STAR")
parser.add_argument('--aligner_extra_args', '-s', action="store", dest="aligner_extra_args", nargs='?',
help="Extra argument to be passed to alignment tool", default="")
parser.add_argument('--region', '-r', action="store", dest="aws_region", help="AWS region")
parser_result = parser.parse_args()
split_num = 0
conf = SparkConf().setAppName("Spark-based RNA-seq Pipeline Alignment")
sc = SparkContext(conf=conf)
if parser_result.input_dir.startswith("s3://"): # From S3
s3_client = boto3.client('s3', region_name=parser_result.aws_region)
# Get number of input files
s3_paginator = s3_client.get_paginator('list_objects')
input_bucket, key_prefix = parser_result.input_dir[5:].strip().split("/", 1)
input_file_num = 0
for result in s3_paginator.paginate(Bucket=input_bucket, Prefix=key_prefix):
for file in result.get("Contents"):
input_file_num += 1
if input_file_num == 0:
raise ValueError("Input directory is invalid or empty!")
split_num = input_file_num
else: # From HDFS
hdfs_process = Popen(shlex.split("hdfs dfs -count {}".format(parser_result.input_dir)),
stdout=PIPE, stderr=PIPE)
hdfs_out, hdfs_error = hdfs_process.communicate()
if hdfs_error:
raise ValueError("Input directory is invalid or empty!")
dir_count, file_count, size, path = hdfs_out.strip().split()
split_num = int(file_count)
subprocess.call(["hdfs", "dfs", "-mkdir", "-p", HDFS_TEMP_OUTPUT_FOLDER])
input_files = sc.wholeTextFiles(parser_result.input_dir, split_num)
aligned_files = input_files.map(alignment_step)
aligned_file_lists = aligned_files.reduceByKey(add)
aligned_samples = aligned_file_lists.map(fuse_alignment)
aligned_samples.collect()
|
VCCRI/Falco
|
source/spark_runner/run_pipeline_alignment.py
|
run_pipeline_alignment.py
|
py
| 15,126 |
python
|
en
|
code
| 37 |
github-code
|
6
|
43855241031
|
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
import torch.optim as optim
import torchvision
from torchvision import transforms, models, datasets
import imageio
import time
import warnings
import random
import sys
import copy
import json
from PIL import Image
####################################################################
# ๆๅฝไธๅๅค็่ฟ็ๅพๅๆฐๆฎๆขๅคไธบ[0,1]ๅบ้ด็ๆฐๆฎ๏ผๆ่ฝๆพ็คบ
def im_convert(tensor):
# ๅฑ็คบๆฐๆฎ
image = tensor.to("cpu").clone().detach()
image = image.numpy().squeeze()
image = image.transpose(1, 2, 0)
# mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
image = image * np.array((0.229, 0.224, 0.225)) + \
np.array((0.485, 0.456, 0.406))
# ๆไฝไบ0็ๅผ่ฎพ็ฝฎไธบ0๏ผ่ถ
่ฟ1็ๆฐๆฎ่ฎพ็ฝฎไธบ1
image = image.clip(0, 1)
return image
####################################################################
####################################################################
def set_parameter_requires_grad(a_model, bol_frozen_param):
if bol_frozen_param:
for param in a_model.parameters():
param.requires_grad = False
####################################################################
def initialize_model(model_name, num_classes, bol_frozen_nn_params, use_pretrained=True):
# ้ๆฉๅ้็ๆจกๅ๏ผไธๅๆจกๅ็ๅๅงๅๆนๆณ็จๅพฎๆ็นๅบๅซ
model_ft = None
input_size = 0
if model_name == "resnet":
""" Resnet152
"""
model_ft = models.resnet152(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, bol_frozen_nn_params)
# ๅๆ นๆฎ
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Sequential(nn.Linear(num_ftrs, 102),
nn.LogSoftmax(dim=1))
input_size = 224
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg16(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, bol_frozen_nn_params)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, bol_frozen_nn_params)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 299
else:
print("Invalid model name, exiting...")
exit()
return model_ft, input_size
data_dir = './flower_data/'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
# data_transformsๆฏไธไธชๅญๅ
ธ๏ผ่ฎฐๅฝๅฏน [่ฎญ็ปๆฐๆฎ] ๅ [้ช่ฏๆฐๆฎ] ็้ขๅค็็ ๆไฝ
data_transforms = {
'train': transforms.Compose(
[transforms.RandomRotation(45), # ้ๆบๆ่ฝฌ๏ผ-45ๅฐ45ๅบฆไน้ด้ๆบ้
transforms.CenterCrop(224), # ไปไธญๅฟๅผๅง่ฃๅช
transforms.RandomHorizontalFlip(p=0.5), # ้ๆบๆฐดๅนณ็ฟป่ฝฌ ้ๆฉไธไธชๆฆ็ๆฆ็
transforms.RandomVerticalFlip(p=0.5), # ้ๆบๅ็ด็ฟป่ฝฌ
# ๅๆฐ1ไธบไบฎๅบฆ๏ผๅๆฐ2ไธบๅฏนๆฏๅบฆ๏ผๅๆฐ3ไธบ้ฅฑๅๅบฆ๏ผๅๆฐ4ไธบ่ฒ็ธ
transforms.ColorJitter(
brightness=0.2, contrast=0.1, saturation=0.1, hue=0.1),
transforms.RandomGrayscale(p=0.025), # ๆฆ็่ฝฌๆขๆ็ฐๅบฆ็๏ผ3้้ๅฐฑๆฏR=G=B
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [
0.229, 0.224, 0.225]) # ๅๅผ๏ผๆ ๅๅทฎ
]),
'valid': transforms.Compose(
[transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
}
batch_size = 4
# image_datasetsๆฏไธไธชๅญๅ
ธ๏ผๅๅซๅญๆพ2ไธชๆฐๆฎ้็ไฟกๆฏ๏ผๅ
ๆฌๅพๅๆฐๆฎๅๅ็ฑปๆ ็ญพ
image_datasets = {x: datasets.ImageFolder(os.path.join(
data_dir, x), data_transforms[x]) for x in ['train', 'valid']}
# ๅๅซไธบ train ๅ valid ไธคไธชๆฐๆฎ้ๅฎไนๅ่ช็ dataloader
dataloaders = {x: torch.utils.data.DataLoader(
image_datasets[x], batch_size=batch_size, shuffle=True) for x in ['train', 'valid']}
# ็ป่ฎก ่ฎญ็ป้ ๅ ้ช่ฏ้ ็ๆฐๆฎ้
# dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid']}
# class_idsๆฏๅ่กจ๏ผไพๅฆ๏ผ['1', '10', '100', '101', '102', '11', ...]
class_ids = image_datasets['train'].classes
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
# ๅๅคไธไธชๆฐๆฎ่ฏปๅ็่ฟญไปฃๅจ
data_iter = iter(dataloaders['valid'])
# region ๆผ็คบๅไธไธชbatch็ๆฐๆฎ๏ผๅนถๅฑ็คบ
# fig = plt.figure(figsize=(18, 10))
# columns = 3
# rows = 3
# # ๅไธไธชbatch_size็ๆฐๆฎ.
# # ๆณจๆ:category_idsๅญๅจ็ๆฏ็ฑปๅซๅจimage_datasets['train'].classesๅ่กจไธญ็ๅบๅท๏ผไธๆฏ็ดๆฅๅญ็ฑปๅซ็ผๅท
# inputs, category_ids = data_iter.next()
# for idx in range(columns*rows):
# ax = fig.add_subplot(rows, columns, idx+1, xticks=[], yticks=[])
# ax.set_title(str(int(class_ids[category_ids[idx]])) + ':' +
# cat_to_name[str(int(class_ids[category_ids[idx]]))])
# plt.imshow(im_convert(inputs[idx]))
# plt.tight_layout()
# plt.show()
# endregion ๆผ็คบๅไธไธชbatch็ๆฐๆฎ๏ผๅนถๅฑ็คบ
# ๅฏ้็ๆฏ่พๅค ['resnet', 'alexnet', 'vgg', 'squeezenet', 'densenet', 'inception']
model_name = 'resnet'
# ๆฏๅฆ็จไบบๅฎถ่ฎญ็ปๅฅฝ็็นๅพๆๅๆจกๅๆฅๅ๏ผไนๅฐฑๆฏๆฒฟ็จๅซไบบ็ๆ้
bol_frozen_nn_param = True
# ๆฏๅฆ็จGPU่ฎญ็ป
train_on_gpu = torch.cuda.is_available()
my_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_ft = models.resnet152(pretrained=True)
model_ft, input_size = initialize_model(
model_name, 102, bol_frozen_nn_param, use_pretrained=True)
# GPU่ฎก็ฎ
model_ft = model_ft.to(my_device)
#ย ๆจกๅไฟๅญ
filename = 'checkpoint.pth'
# ๆฏๅฆ่ฎญ็ปๆๆๅฑ
params_to_update = model_ft.parameters()
# print('params_to_update:\n', params_to_update)
# params_to_update = model_ft.named_parameters()
# print('params_to_update:\n', params_to_update)
print("Params to learn:")
if bol_frozen_nn_param:
params_to_update = []
for name, param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t", name)
else:
for name, param in model_ft.named_parameters():
if param.requires_grad == True:
print("\t", name)
# ไผๅๅจ่ฎพ็ฝฎ
optimizer_ft = optim.Adam(params_to_update, lr=1e-2)
scheduler = optim.lr_scheduler.StepLR(
optimizer_ft, step_size=7, gamma=0.1) # ๅญฆไน ็ๆฏ7ไธชepoch่กฐๅๆๅๆฅ็1/10
# ๆๅไธๅฑๅทฒ็ปLogSoftmax()ไบ๏ผๆไปฅไธ่ฝnn.CrossEntropyLoss()ๆฅ่ฎก็ฎไบ๏ผnn.CrossEntropyLoss()็ธๅฝไบlogSoftmax()ๅnn.NLLLoss()ๆดๅ
criterion = nn.NLLLoss()
# ่ฟ้ไธ็จ criterion = nn.CrossEntropyLoss()
# =====================================================================================================
# =====================================================================================================
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False, filename=filename):
since = time.time()
best_acc = 0
# region ๅ ่ฝฝๆจกๅ
'''
checkpoint = torch.load(filename)
best_acc = checkpoint['best_acc']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
model.class_to_idx = checkpoint['mapping']
'''
# endregion
model.to(my_device)
val_acc_history = []
train_acc_history = []
train_losses = []
valid_losses = []
LRs = [optimizer.param_groups[0]['lr']]
best_model_wts = copy.deepcopy(model.state_dict())
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# ่ฎญ็ปๅ้ช่ฏ
for phase in ['train', 'valid']:
if phase == 'train':
model.train() # ่ฎญ็ป
else:
model.eval() # ้ช่ฏ
running_loss = 0.0
running_corrects = 0
# ๆๆฐๆฎ้ฝๅไธช้
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(my_device)
labels = labels.to(my_device)
# ๆธ
้ถ
optimizer.zero_grad()
# ๅชๆ่ฎญ็ป็ๆถๅ่ฎก็ฎๅๆดๆฐๆขฏๅบฆ
with torch.set_grad_enabled(phase == 'train'):
if is_inception and phase == 'train':
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4*loss2
else: # resnetๆง่ก็ๆฏ่ฟ้
outputs = model(inputs)
loss = criterion(outputs, labels)
# torch.max(outputs, 1)่ฟๅๆฏไธ่ก็ๆๅคงๅผ๏ผไปฅๅๆๅคงๅผๆๅจ็ๅๅบๅท
# ้ขๆตๅผไธบๅ็ฑปๅจๅ็ฑปๅ่กจไธญ็ๅบๅท๏ผๆ ็ญพๅผไธบๅ็ฑปๅจๅ็ฑปๅ่กจไธญ็ๅบๅท
pred_values, pred_idxs = torch.max(outputs, 1)
print('outputs:', outputs)
print('predict value:', pred_values)
print('prdict_category:', pred_idxs)
print('labels:', labels.data)
# ่ฎญ็ป้ถๆฎตๆดๆฐๆ้
if phase == 'train':
loss.backward()
optimizer.step()
# ่ฎก็ฎๆๅคฑ
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(pred_idxs == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double(
) / len(dataloaders[phase].dataset)
time_elapsed = time.time() - since
print('Time elapsed {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# ๅพๅฐๆๅฅฝ้ฃๆฌก็ๆจกๅ
if phase == 'valid' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
state = {
'state_dict': model.state_dict(),
'best_acc': best_acc,
'optimizer': optimizer.state_dict(),
}
torch.save(state, filename)
if phase == 'valid':
val_acc_history.append(epoch_acc)
valid_losses.append(epoch_loss)
scheduler.step(epoch_loss)
if phase == 'train':
train_acc_history.append(epoch_acc)
train_losses.append(epoch_loss)
print('Optimizer learning rate : {:.7f}'.format(
optimizer.param_groups[0]['lr']))
LRs.append(optimizer.param_groups[0]['lr'])
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# ่ฎญ็ปๅฎๅ็จๆๅฅฝ็ไธๆฌกๅฝๅๆจกๅๆ็ป็็ปๆ
model.load_state_dict(best_model_wts)
return model, val_acc_history, train_acc_history, valid_losses, train_losses, LRs
# =====================================================================================================
# ่ฎญ็ป่ชๅฎไน็ๆๅไธๅฑ โโโโ ๅ
จ่ฟๆฅๅฑ
# model_ft, val_acc_history, train_acc_history, valid_losses, train_losses, LRs = train_model(
# model_ft, dataloaders, criterion, optimizer_ft, num_epochs=1, is_inception=(model_name == "inception"))
# ๆ็ฝ็ปๅๆฐๅ่ฎพ็ฝฎไธบๅฏๅญฆไน ็็ถๆ
for param in model_ft.parameters():
param.requires_grad = True
# ๅ็ปง็ปญ่ฎญ็ปๆๆ็ๅๆฐ๏ผๅญฆไน ็่ฐๅฐไธ็น
optimizer = optim.Adam(params_to_update, lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
# ๆๅคฑๅฝๆฐ
criterion = nn.NLLLoss()
# Load the checkpoint
checkpoint = torch.load(filename)
best_acc = checkpoint['best_acc']
model_ft.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
#model_ft.class_to_idx = checkpoint['mapping']
# ๅๆฌก่ฎญ็ป๏ผ่ฟๆฌก่ฎญ็ปๆดไธชๆจกๅ
# model_ft, val_acc_history, train_acc_history, valid_losses, train_losses, LRs = train_model(
# model_ft, dataloaders, criterion, optimizer, num_epochs=1, is_inception=(model_name == "inception"))
# ๅพๅฐไธไธชbatch็ๆต่ฏๆฐๆฎ
dataiter = iter(dataloaders['valid'])
images, labels = dataiter.next()
# ่ฎญ็ปๅฎtrain_datasetsไนๅ๏ผmodel่ฆๆฅๆต่ฏๆ ทๆฌไบใๅจmodel(test_datasets)ไนๅ๏ผ้่ฆๅ ไธmodel.eval().
# ๅฆๅ็่ฏ๏ผๆ่พๅ
ฅๆฐๆฎ๏ผๅณไฝฟไธ่ฎญ็ป๏ผๅฎไนไผๆนๅๆๅผใ่ฟๆฏmodelไธญๅซๆbatch normalizationๅฑๆๅธฆๆฅ็็ๆง่ดจใ
model_ft.eval()
if train_on_gpu:
output = model_ft(images.cuda())
else:
output = model_ft(images)
predict_value, preds_tensor = torch.max(output, 1)
preds = np.squeeze(preds_tensor.numpy()) if not train_on_gpu else np.squeeze(
preds_tensor.cpu().numpy())
print(predict_value)
print(preds)
print(labels)
# region ๆพ็คบ้ช่ฏ็ๅพๅๅๅ็ฑป็ปๆ
fig = plt.figure(figsize=(18, 12))
columns = 2
rows = 2
for idx in range(columns*rows):
ax = fig.add_subplot(rows, columns, idx+1, xticks=[], yticks=[])
plt.imshow(im_convert(images[idx]))
ax.set_title("{} (label:{}/{})".format(cat_to_name[class_ids[int(preds[idx])]],
class_ids[labels[idx].item()], cat_to_name[class_ids[labels[idx].item()]]),
color=("green" if cat_to_name[str(preds[idx])] == cat_to_name[str(labels[idx].item())] else "red"))
plt.tight_layout()
plt.show()
# endregion
|
harryjd/keras_dogs_vs_cats
|
ๅพๅ่ฏๅซ_ไปฟๅๅๅฎ่ฟช็ไพๅญ.py
|
ๅพๅ่ฏๅซ_ไปฟๅๅๅฎ่ฟช็ไพๅญ.py
|
py
| 14,465 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8385118921
|
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import subprocess
import optparse
from collections import namedtuple
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
import sumolib # noqa
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
from sumolib.output import parse_fast # noqa
TLTuple = namedtuple('TLTuple', ['edgeID', 'dist', 'time', 'connection'])
PairKey = namedtuple('PairKey', ['edgeID', 'edgeID2', 'dist'])
PairData = namedtuple('PairData', ['otl', 'oconnection', 'tl', 'connection', 'betweenOffset', 'startOffset',
'travelTime', 'prio', 'numVehicles', 'ogreen', 'green'])
def pair2str(p, full=True):
brief = "%s,%s s=%.1f b=%.1f t=%.1f" % (
p.otl.getID(), p.tl.getID(), p.startOffset, p.betweenOffset, p.travelTime)
if full:
return brief + " og=%s g=%s p=%s n=%s" % (p.ogreen, p.green, p.prio, p.numVehicles)
else:
return brief
def logAddedPair(TLSP, sets, operation):
print("added pair %s,%s with operation %s" %
(TLSP.otl.getID(), TLSP.tl.getID(), operation))
for s in sets:
print(" " + " ".join([pair2str(p, False) for p in s]))
def get_options(args=None):
optParser = optparse.OptionParser()
optParser.add_option("-n", "--net-file", dest="netfile",
help="define the net file (mandatory)")
optParser.add_option("-o", "--output-file", dest="outfile",
default="tlsOffsets.add.xml", help="define the output filename")
optParser.add_option("-r", "--route-file", dest="routefile",
help="define the inputroute file (mandatory)")
optParser.add_option("-a", "--additional-file", dest="addfile",
help="define replacement tls plans to be coordinated")
optParser.add_option("-v", "--verbose", action="store_true",
default=False, help="tell me what you are doing")
optParser.add_option("-i", "--ignore-priority", dest="ignorePriority", action="store_true",
default=False, help="Ignore road priority when sorting TLS pairs")
optParser.add_option("--speed-factor", type="float",
default=0.8, help="avg ration of vehicle speed in relation to the speed limit")
optParser.add_option("-e", "--evaluate", action="store_true",
default=False, help="run the scenario and print duration statistics")
(options, args) = optParser.parse_args(args=args)
if not options.netfile or not options.routefile:
optParser.print_help()
sys.exit()
return options
def locate(tlsToFind, sets):
"""return
- the set in which the given traffic light exists
- the pair in which it was found
- the index within the pair
"""
for s in sets:
for pair in s:
if tlsToFind == pair.otl:
return s, pair, 0
elif tlsToFind == pair.tl:
return s, pair, 1
return None, None, None
def coordinateAfterSet(TLSP, l1, l1Pair, l1Index):
# print "coordinateAfter\n TLSP: %s\n l1Pair: %s\n l1Index=%s" % (
# pair2str(TLSP), pair2str(l1Pair), l1Index)
if l1Index == 0:
TLSPdepart = l1Pair.startOffset - TLSP.ogreen
TLSParrival = TLSPdepart + TLSP.travelTime
TLSPstartOffset2 = TLSParrival - TLSP.green
TLSP = TLSP._replace(startOffset=l1Pair.startOffset,
betweenOffset=TLSPstartOffset2 - l1Pair.startOffset)
else:
l1depart = l1Pair.startOffset + l1Pair.betweenOffset + TLSP.ogreen
TLSParrival = l1depart + TLSP.travelTime
TLSPstartOffset = TLSParrival - TLSP.green
TLSP = TLSP._replace(
startOffset=l1depart, betweenOffset=TLSPstartOffset - l1depart)
l1.append(TLSP)
return TLSP
def coordinateBeforeSet(TLSP, l2, l2Pair, l2Index):
# print "coordinateBeforeSet\n TLSP: %s\n l2Pair: %s\n l2Index=%s" % (
# pair2str(TLSP), pair2str(l2Pair), l2Index)
if l2Index == 0:
l2arrival = l2Pair.startOffset + TLSP.green
TLSPdepart = l2arrival - TLSP.travelTime
TLSPstartOffset = TLSPdepart - TLSP.ogreen
TLSP = TLSP._replace(
startOffset=TLSPstartOffset, betweenOffset=l2Pair.startOffset - TLSPstartOffset)
else:
l2arrival = l2Pair.startOffset + l2Pair.betweenOffset + TLSP.green
TLSPdepart = l2arrival - TLSP.travelTime
TLSPstartOffset = TLSPdepart - TLSP.ogreen
TLSP = TLSP._replace(
startOffset=TLSPstartOffset, betweenOffset=l2arrival - TLSPstartOffset)
l2.append(TLSP)
return TLSP
def computePairOffsets(TLSPList, verbose):
c1, c2, c3, c4, c5 = 0, 0, 0, 0, 0
sets = [] # sets of coordinate TLPairs
operation = ""
for TLSP in TLSPList:
l1, l1Pair, l1Index = locate(TLSP.otl, sets)
l2, l2Pair, l2Index = locate(TLSP.tl, sets)
# print(l1)
if l1 is None and l2 is None:
# new set
newlist = []
newlist.append(TLSP)
sets.append(newlist)
c1 += 1
operation = "newSet"
elif l2 is None and l1 is not None:
# add to set 1 - add after existing set
TLSP = coordinateAfterSet(TLSP, l1, l1Pair, l1Index)
c2 += 1
operation = "addAfterSet"
elif l1 is None and l2 is not None:
# add to set 2 - add before existing set
TLSP = coordinateBeforeSet(TLSP, l2, l2Pair, l2Index)
c3 += 1
operation = "addBeforeSet"
else:
if l1 == l2:
# cannot uncoordinated both tls. coordinate the first
# arbitrarily
TLSP = coordinateAfterSet(TLSP, l1, l1Pair, l1Index)
c4 += 1
operation = "addHalfCoordinated"
else:
# merge sets
TLSP = coordinateAfterSet(TLSP, l1, l1Pair, l1Index)
if verbose:
logAddedPair(TLSP, sets, "addAfterSet (intermediate)")
# print "merge\n TLSP: %s\n l1Pair: %s\n l1Index=%s\n l2Pair: %s\n l2Index=%s" % (
# pair2str(TLSP), pair2str(l1Pair), l1Index, pair2str(l2Pair),
# l2Index)
if l2Index == 0:
dt = TLSP.startOffset + \
TLSP.betweenOffset - l2Pair.startOffset
else:
dt = TLSP.startOffset + TLSP.betweenOffset - \
(l2Pair.startOffset + l2Pair.betweenOffset)
merge(sets, l1, l2, dt)
c5 += 1
operation = "mergeSets"
if verbose:
logAddedPair(TLSP, sets, operation)
print("operations: newSet=%s addToSet=%s addToSet2=%s addHalfCoordinated=%s mergeSets=%s" % (
c1, c2, c3, c4, c5))
return(sets)
def merge(sets, list1, list2, dt):
for elem in list2:
list1.append(elem._replace(startOffset=elem.startOffset + dt))
sets.remove(list2)
def finalizeOffsets(sets):
offsetDict = {}
for singleSet in sets:
singleSet.sort(
key=lambda pd: (pd.prio, pd.numVehicles / pd.travelTime), reverse=True)
for pair in singleSet:
# print " %s,%s:%s,%s" % (pair.otl.getID(), pair.tl.getID(),
# pair.startOffset, pair.betweenOffset)
tl1 = pair.otl.getID()
tl2 = pair.tl.getID()
betweenOffset = pair.betweenOffset
startOffset = pair.startOffset
if tl1 not in offsetDict:
# print " added %s offset %s" % (tl1, startOffset)
offsetDict[tl1] = startOffset
if tl2 not in offsetDict:
# print " added %s offset %s" % (tl2, startOffset +
# betweenOffset)
offsetDict[tl2] = startOffset + betweenOffset
return offsetDict
def getTLSInRoute(net, edge_ids):
rTLSList = [] # list of traffic lights along the current route
dist = 0
time = 0
for edgeID, nextEdgeID in zip(edge_ids[:-1], edge_ids[1:]):
edge = net.getEdge(edgeID)
nextEdge = net.getEdge(nextEdgeID)
connection = edge.getOutgoing()[nextEdge][0]
TLS = edge.getTLS()
dist += edge.getLength()
time += edge.getLength() / edge.getSpeed()
alreadyFound = [item for item in rTLSList if item[0] == edgeID]
if TLS and not alreadyFound:
rTLSList.append(TLTuple(edgeID, dist, time, connection))
dist = 0
time = 0
return rTLSList
def getFirstGreenOffset(tl, connection):
index = connection._tlLink
tlp = tl.getPrograms()
if len(tlp) != 1:
raise RuntimeError("Found %s programs for tl %s" %
(len(tlp), connection._tls))
phases = list(tlp.values())[0].getPhases()
start = 0
for p in phases:
if p.state[index] in ['G', 'g']:
return start
else:
start += p.duration
raise RuntimeError(
"No green light for tlIndex %s at tl %s" % (index, connection._tls))
def getTLPairs(net, routeFile, speedFactor, ignorePriority):
# pairs of traffic lights
TLPairs = {} # PairKey -> PairData
for route in parse_fast(routeFile, 'route', ['edges']):
rTLSList = getTLSInRoute(net, route.edges.split())
for oldTL, TLelement in zip(rTLSList[:-1], rTLSList[1:]):
key = PairKey(oldTL.edgeID, TLelement.edgeID, oldTL.dist)
numVehicles = 0 if key not in TLPairs else TLPairs[key].numVehicles
tl = net.getEdge(TLelement.edgeID).getTLS()
otl = net.getEdge(oldTL.edgeID).getTLS()
edge = net.getEdge(TLelement.edgeID)
connection = TLelement.connection
oconnection = oldTL.connection
ogreen = getFirstGreenOffset(otl, oconnection)
green = getFirstGreenOffset(tl, connection)
travelTime = TLelement.time / speedFactor
betweenOffset = travelTime + ogreen - green
startOffset = 0
# relevant data for a pair of traffic lights
prio = 1 if ignorePriority else edge.getPriority()
TLPairs[key] = PairData(otl, oconnection, tl, connection, betweenOffset, startOffset, travelTime,
prio, numVehicles + 1, ogreen, green)
return TLPairs
def removeDuplicates(TLPairs):
# @todo: for multiple pairs with the same edges but different dist, keep only the one with the largest numVehicles
return TLPairs
def main(options):
net = sumolib.net.readNet(options.netfile, withLatestPrograms=True)
if options.addfile is not None:
sumolib.net.readNet(options.addfile, withLatestPrograms=True, net=net)
TLPairs = getTLPairs(net, options.routefile, options.speed_factor, options.ignorePriority)
TLPairs = removeDuplicates(TLPairs)
sortHelper = [(
(pairData.prio, pairData.numVehicles / pairData.travelTime), # sortKey
(pairKey, pairData)) # payload
for pairKey, pairData in TLPairs.items()]
tlPairsList = [
value for sortKey, value in sorted(sortHelper, reverse=True)]
print("number of tls-pairs: %s" % len(tlPairsList))
if options.verbose:
print('\n'.join(["edges=%s,%s prio=%s numVehicles/time=%s" % (
pairKey.edgeID, pairKey.edgeID2, pairData.prio, pairData.numVehicles / pairData.travelTime)
for pairKey, pairData in tlPairsList]))
coordinatedSets = computePairOffsets(
[pairData for pairKey, pairData in tlPairsList], options.verbose)
offsetDict = finalizeOffsets(coordinatedSets)
with open(options.outfile, 'w') as outf:
outf.write('<additional>\n')
for ID, startOffset in sorted(offsetDict.items()):
programID = list(net.getTLSSecure(ID).getPrograms().keys())[0]
outf.write(' <tlLogic id="%s" programID="%s" offset="%.2f"/>\n' %
(ID, programID, startOffset))
outf.write('</additional>\n')
sumo = sumolib.checkBinary('sumo')
if options.evaluate:
additionals = [options.outfile]
if options.addfile:
additionals = [options.addfile] + additionals
subprocess.call([sumo,
'-n', options.netfile,
'-r', options.routefile,
'-a', ','.join(additionals),
'-v', '--no-step-log', '--duration-log.statistics'], stdout=sys.stdout)
if __name__ == "__main__":
options = get_options(sys.argv)
main(options)
|
ngctnnnn/DRL_Traffic-Signal-Control
|
sumo-rl/sumo/tools/tlsCoordinator.py
|
tlsCoordinator.py
|
py
| 12,854 |
python
|
en
|
code
| 17 |
github-code
|
6
|
24337846458
|
# -*- coding: utf-8 -*-
import os
import sys
import shutil
import datetime
import numpy as np
from sklearn.model_selection import train_test_split
from PIL import Image
from keras import models
from keras import layers
from keras import optimizers
from keras import regularizers
from keras import backend as K
from keras.callbacks import EarlyStopping
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
models_save_dir = './models/'
if not os.path.exists(models_save_dir):
os.mkdir(models_save_dir)
dataset_dir = './datasets/raw_datasets/Images/'
train_dir = './datasets/train/'
validation_dir = './datasets/validation/'
test_dir = './datasets/test/'
# if the second arguments is '-n' then split data again
if len(sys.argv) >= 2 and sys.argv[1] == '-n':
if os.path.exists(train_dir):
shutil.rmtree(train_dir)
if os.path.exists(validation_dir):
shutil.rmtree(validation_dir)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.mkdir(train_dir)
os.mkdir(validation_dir)
os.mkdir(test_dir)
for i in range(0, 43):
#ไธๅ
ฑ43ไธชๅ็ฑป๏ผๆฏไธชๅพช็ฏไธๆฌก๏ผๆ็
ง8:1:1็ๆฏไพๅ้
่ฎญ็ป/validation/ๆต่ฏ ๆฐๆฎ
category = i
foldername = str(i).zfill(5)
foldername_new = str(i)
dataset_path = os.path.join(dataset_dir, foldername)
train_path = os.path.join(train_dir, foldername_new)
os.mkdir(train_path)
validation_path = os.path.join(validation_dir, foldername_new)
os.mkdir(validation_path)
test_path = os.path.join(test_dir, foldername_new)
os.mkdir(test_path)
dataset = np.array(os.listdir(dataset_path))
np.random.shuffle(dataset)
#train_dataset, test_dataset = train_test_split(dataset, target, test_size=0.2)
"""
train_test_split method raise 'too many values to unpack' error
so use array slice simplely
"""
train_dataset = dataset[0:int(len(dataset)*0.8)]
validation_dataset = dataset[int(len(dataset)*0.8):int(len(dataset)*0.9)]
test_dataset = dataset[int(len(dataset)*0.9):]
for train_item in train_dataset:
im = Image.open(os.path.join(dataset_path, train_item))
im.save(os.path.join(train_path, train_item.split('.')[0] + '.png'))
#shutil.copy(os.path.join(dataset_path, train_item), train_path)
for validation_item in validation_dataset:
im = Image.open(os.path.join(dataset_path, validation_item))
im.save(os.path.join(validation_path, validation_item.split('.')[0] + '.png'))
#shutil.copy(os.path.join(dataset_path, validation_item), validation_path)
for test_item in test_dataset:
im = Image.open(os.path.join(dataset_path, test_item))
im.save(os.path.join(test_path, test_item.split('.')[0] + '.png'))
#shutil.copy(os.path.join(dataset_path, test_item), test_path)
"""
clear_session every trian
"""
K.clear_session()
batch_size = 10
steps_per_epoch = int(sum([len(files) for r, d, files in os.walk(train_dir)])/batch_size)
model = models.Sequential()
model.add(layers.Conv2D(32, (3,3), activation='relu', input_shape=(50, 50, 3)))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(64, (3,3), activation='relu'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(128, (3,3), activation='relu'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(43, activation='softmax'))
"""
check our model summary
"""
#model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['accuracy']
)
"""
start processing input data
turn raw image to numpy array
"""
train_datagen = ImageDataGenerator(rescale=1./255,
#rotation_range=40,
#width_shift_range=0.2,
#height_shift_range=0.2,
#shear_range=0.2,
#zoom_range=0.2,
#horizontal_flip=True,
#fill_mode='nearest'
)
validation_datagen = ImageDataGenerator(rescale=1./255,
#rotation_range=40,
#width_shift_range=0.2,
#height_shift_range=0.2,
#shear_range=0.2,
#zoom_range=0.2,
#horizontal_flip=True,
#fill_mode='nearest'
)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(50,50),
batch_size=batch_size,
class_mode='categorical')
validation_generator = validation_datagen.flow_from_directory(
validation_dir,
target_size=(50,50),
batch_size=batch_size,
class_mode='categorical')
earlystopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto')
history = model.fit_generator(train_generator,
steps_per_epoch=steps_per_epoch,
epochs=20,
validation_data=validation_generator,
validation_steps=15,
callbacks=[earlystopping])
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(50,50),
batch_size=20,
class_mode='categorical')
loss, acc = model.evaluate_generator(test_generator, 20)
model.save(os.path.join(models_save_dir, 'traffic_' + datetime.datetime.now().strftime('%Y%m%d_%H:%M:%S') + '_' + str(acc) + '.h5'))
|
jabez128/dl-trafficsigns-detection
|
classifier.py
|
classifier.py
|
py
| 5,975 |
python
|
en
|
code
| 3 |
github-code
|
6
|
22982145642
|
import sys
import json
import os
import io
import collections
import argparse
import logging
from e2edutch import conll
from e2edutch import minimize
from e2edutch import util
from e2edutch import coref_model as cm
from e2edutch import naf
import tensorflow.compat.v1 as tf
logger = logging.getLogger('e2edutch')
class Predictor(object):
"""
A predictor object loads a pretrained e2e model to predict coreferences.
It can be used to predict coreferences on tokenized text.
"""
def __init__(self, model_name='final', config=None, verbose=False):
if verbose:
logger.setLevel(logging.INFO)
if config:
self.config = config
else:
# if no configuration is provided, try to get a default config.
self.config = util.initialize_from_env(model_name=model_name)
# Clear tensorflow context:
tf.reset_default_graph()
self.session = tf.compat.v1.Session()
try:
self.model = cm.CorefModel(self.config)
self.model.restore(self.session)
except ValueError:
raise Exception("Trying to reload the model while the previous " +
"session hasn't been ended. Close the existing " +
"session with predictor.end_session()")
def predict(self, example):
"""
Predict coreference spans for a tokenized text.
Args:
example (dict): dict with the following fields:
sentences ([[str]])
doc_id (str)
clusters ([[(int, int)]]) (optional)
Returns:
[[(int, int)]]: a list of clusters. The items of the cluster are
spans, denoted by their start end end token index
"""
tensorized_example = self.model.tensorize_example(
example, is_training=False)
feed_dict = {i: t for i, t in zip(
self.model.input_tensors, tensorized_example)}
_, _, _, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores = self.session.run(
self.model.predictions, feed_dict=feed_dict)
predicted_antecedents = self.model.get_predicted_antecedents(
top_antecedents, top_antecedent_scores)
predicted_clusters, _ = self.model.get_predicted_clusters(
top_span_starts, top_span_ends, predicted_antecedents)
return predicted_clusters
def end_session(self):
"""
Close the session, clearing the tensorflow model context.
"""
self.session.close()
tf.reset_default_graph()
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('input_filename')
parser.add_argument('-o', '--output_file',
type=argparse.FileType('w'), default=sys.stdout)
parser.add_argument('-f', '--format_out', default='conll',
choices=['conll', 'jsonlines', 'naf'])
parser.add_argument('-m', '--model',
type=str,
default='final',
help="model name")
parser.add_argument('-c', '--word_col', type=int, default=2)
parser.add_argument('--cfg_file',
type=str,
default=None,
help="config file")
parser.add_argument('--model_cfg_file',
type=str,
default=None,
help="model config file")
parser.add_argument('-v', '--verbose', action='store_true')
return parser
def read_jsonlines(input_filename):
for line in open(input_filename).readlines():
example = json.loads(line)
yield example
def main(args=None):
parser = get_parser()
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.INFO)
# Input file in .jsonlines format or .conll.
input_filename = args.input_filename
ext_input = os.path.splitext(input_filename)[-1]
if ext_input not in ['.conll', '.jsonlines', '.txt', '.naf']:
raise Exception(
'Input file should be .naf, .conll, .txt or .jsonlines, but is {}.'
.format(ext_input))
if ext_input == '.conll':
labels = collections.defaultdict(set)
stats = collections.defaultdict(int)
docs = minimize.minimize_partition(
input_filename, labels, stats, args.word_col)
elif ext_input == '.jsonlines':
docs = read_jsonlines(input_filename)
elif ext_input == '.naf':
naf_obj = naf.get_naf(input_filename)
jsonlines_obj, term_ids, tok_ids = naf.get_jsonlines(naf_obj)
docs = [jsonlines_obj]
else:
text = open(input_filename).read()
docs = [util.create_example(text)]
output_file = args.output_file
config = util.initialize_from_env(model_name=args.model,
cfg_file=args.cfg_file,
model_cfg_file=args.model_cfg_file)
predictor = Predictor(config=config)
sentences = {}
predictions = {}
for example_num, example in enumerate(docs):
example["predicted_clusters"] = predictor.predict(example)
if args.format_out == 'jsonlines':
output_file.write(json.dumps(example))
output_file.write("\n")
else:
predictions[example['doc_key']] = example["predicted_clusters"]
sentences[example['doc_key']] = example["sentences"]
if example_num % 100 == 0:
logger.info("Decoded {} examples.".format(example_num + 1))
if args.format_out == 'conll':
conll.output_conll(output_file, sentences, predictions)
elif args.format_out == 'naf':
# Check number of docs - what to do if multiple?
# Create naf obj if input format was not naf
if ext_input != '.naf':
# To do: add linguistic processing layers for terms and tokens
logger.warn(
'Outputting NAF when input was not naf,'
+ 'no dependency information available')
for doc_key in sentences:
naf_obj, term_ids = naf.get_naf_from_sentences(
sentences[doc_key])
naf_obj = naf.create_coref_layer(
naf_obj, predictions[doc_key], term_ids)
naf_obj = naf.add_linguistic_processors(naf_obj)
buffer = io.BytesIO()
naf_obj.dump(buffer)
output_file.write(buffer.getvalue().decode('utf-8'))
# To do, make sepearate outputs?
# TO do, use dependency information from conll?
else:
# We only have one input doc
naf_obj = naf.create_coref_layer(
naf_obj, example["predicted_clusters"], term_ids)
naf_obj = naf.add_linguistic_processors(naf_obj)
buffer = io.BytesIO()
naf_obj.dump(buffer)
output_file.write(buffer.getvalue().decode('utf-8'))
if __name__ == "__main__":
main()
|
Filter-Bubble/e2e-Dutch
|
e2edutch/predict.py
|
predict.py
|
py
| 7,163 |
python
|
en
|
code
| 9 |
github-code
|
6
|
38684469232
|
# pylint: disable=attribute-defined-outside-init,wrong-import-order,redefined-outer-name,invalid-name
import gc
from configparser import ConfigParser
from tempfile import TemporaryDirectory
import magic
import pytest
from storage.binary_service import BinaryService
from storage.db_interface_backend import BackEndDbInterface
from storage.MongoMgr import MongoMgr
from test.common_helper import create_test_firmware, get_config_for_testing, store_binary_on_file_system
TEST_FW = create_test_firmware()
@pytest.fixture
def binary_service():
with TemporaryDirectory(prefix='fact_test_') as tmp_dir:
config = get_config_for_testing(temp_dir=tmp_dir)
mongo_server = MongoMgr(config=config)
_init_test_data(config, tmp_dir)
yield BinaryService(config=config)
mongo_server.shutdown()
gc.collect()
def _init_test_data(config: ConfigParser, tmp_dir: str):
backend_db_interface = BackEndDbInterface(config=config)
backend_db_interface.add_firmware(TEST_FW)
store_binary_on_file_system(tmp_dir, TEST_FW)
backend_db_interface.shutdown()
def test_get_binary_and_file_name(binary_service):
binary, file_name = binary_service.get_binary_and_file_name(TEST_FW.uid)
assert file_name == TEST_FW.file_name, 'file_name not correct'
assert binary == TEST_FW.binary, 'invalid result not correct'
def test_get_binary_and_file_name_invalid_uid(binary_service):
binary, file_name = binary_service.get_binary_and_file_name('invalid_uid')
assert binary is None, 'should be none'
assert file_name is None, 'should be none'
def test_get_repacked_binary_and_file_name(binary_service):
tar, file_name = binary_service.get_repacked_binary_and_file_name(TEST_FW.uid)
assert file_name == f'{TEST_FW.file_name}.tar.gz', 'file_name not correct'
file_type = magic.from_buffer(tar, mime=False)
assert 'gzip compressed data' in file_type, 'Result is not an tar.gz file'
def test_get_repacked_binary_and_file_name_invalid_uid(binary_service):
binary, file_name = binary_service.get_repacked_binary_and_file_name('invalid_uid')
assert binary is None, 'should be none'
assert file_name is None, 'should be none'
def test_read_partial_binary(binary_service):
partial_binary = binary_service.read_partial_binary(TEST_FW.uid, 30, 14)
assert len(partial_binary) == 14
assert partial_binary == b'get_files_test', 'invalid result not correct'
def test_read_partial_binary_invalid_uid(binary_service):
result = binary_service.read_partial_binary('invalid_uid', 0, 1337)
assert result == b'', 'result should be empty'
|
5am1i/Fact
|
src/test/integration/storage/test_binary_service.py
|
test_binary_service.py
|
py
| 2,626 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26169526568
|
import argparse
import json
import os
import time
import torch
from redsandt.encoder.bert_encoder import BERTEncoder
from redsandt.framework.bag_re import BagRE
from redsandt.selector.bag_attention import BagAttention
# Pass arguments
parser = argparse.ArgumentParser(
description='Improving Distantly-Supervised Relation Extraction through BERT-based Label & Instance Embeddings')
parser.add_argument('--train', dest="train", action='store_true', help='training mode')
parser.add_argument('--eval', dest="eval", action='store_true', help='evaluation mode')
parser.add_argument('--dataset', dest="dataset", required=True, help='dataset')
parser.add_argument('--config', dest="config", required=True, help='configuration file')
parser.add_argument('--model_dir', dest="model_dir", required=True, help='model dir')
parser.add_argument('--model_name', dest="model_name", required=True, help='model name')
args = parser.parse_args()
# Some basic settings
ROOT_PATH = '.'
DATASET = args.dataset # NYT-10 or GDS
MODEL_DIR = args.model_dir
MODEL_NAME = args.model_name
config = json.load(open(args.config))
# Create folders
if not os.path.exists('experiments/ckpt/' + DATASET + '/' + MODEL_DIR):
os.makedirs('experiments/ckpt/' + DATASET + '/' + MODEL_DIR)
if not os.path.exists('experiments/outputs/' + DATASET + '/' + MODEL_DIR):
os.makedirs('experiments/outputs/' + DATASET + '/' + MODEL_DIR)
ckpt = 'experiments/ckpt/' + DATASET + '/' + MODEL_DIR + '/' + MODEL_NAME + '.pth.tar'
if DATASET == 'NYT-10':
rel2id = json.load(open(os.path.join(ROOT_PATH, 'benchmark/NYT-10-enhanced/nyt10_rel2id.json')))
elif DATASET == 'GDS':
rel2id = json.load(open(os.path.join(ROOT_PATH, 'benchmark/GDS-enhanced/gids_rel2id.json')))
# DEFINE SENTENCE ENCODER
print('Defining the sentence encoder...')
sentence_encoder = BERTEncoder(max_length=config['encoder']['max_length'], num_labels=config['encoder']['num_labels'],
pretrained_model=config['encoder']['pretrained_model'],
drop=config['encoder']['encoder_dropout'], freeze_bert=config['encoder']['freeze_bert'],
text_stp=config['encoder']['text_stp'], entity_types=config['encoder'][
'entity_types'], dataset=DATASET)
# DEFINE MODEL
print("\nDefining model...")
model = BagAttention(sentence_encoder, len(rel2id), rel2id, config['framework']['selector_dropout'])
# DEFINE TRAINING FRAMEWORK
print("\nDefining learning framework...")
model_path = DATASET + '/' + MODEL_DIR
framework = BagRE(train_path=config['train_data_path'], val_path=config['val_data_path'],
test_path=config['test_data_path'], model_name=model_path, model=model, ckpt=ckpt,
batch_size=config['framework']['batch_size'], max_epoch=config['framework']['max_epoch'],
lr=config['framework']['lr'], weight_decay=config['framework']['weight_decay'],
warmup_step_ratio=config['framework']['warmup_step_ratio'], opt=config['framework']['opt'],
weighted_loss=config['framework']['weighted_loss'], bag_size=config['framework']['bag_size'])
# TRAIN MODEL
if args.train:
print("\nTraining model...")
start = time.time()
framework.train_model()
end = time.time()
print("Training time: ", end - start, "sec.")
# EVALUATE MODEL
if args.eval:
print("\nEvaluate model on testing data...")
start = time.time()
framework.load_state_dict(torch.load(ckpt)['state_dict'])
result = framework.eval_model(framework.test_loader, save_eval_metrics=True)
end = time.time()
print("Testing time: ", end - start, "sec.")
# Print Statistics
print('AUC: {}'.format(result['auc']))
print('P@100: {}'.format(result['p@100']))
print('P@200: {}'.format(result['p@200']))
print('P@300: {}'.format(result['p@300']))
print('P@500: {}'.format(result['p@500']))
print('P@1000: {}'.format(result['p@1000']))
print('P@2000: {}'.format(result['p@2000']))
print('P@all: {}'.format(result['p@all']))
print('\nRelation Distribution on Top 300 predictions:')
for key, value in result['rel_dist_at_300'].items():
print(key, ": ", value)
|
DespinaChristou/REDSandT
|
redsandt.py
|
redsandt.py
|
py
| 4,210 |
python
|
en
|
code
| 22 |
github-code
|
6
|
38827999454
|
import random
import qrcode
import qrcode.image.svg
from io import BytesIO
from django.shortcuts import render
from django.views.generic import View
class IndexView(View):
def get(self, request, *args, **kwargs):
template = 'index.html'
return render(
request,
template,
)
def generate_random_code():
num = "12345678900987654321"
numbers = random.sample(num, 5)
five_last_number = ''
for number in numbers:
five_last_number += number
return five_last_number
class CustomerQrAndBarcodeScan(View):
def post(self, request, *args, **kwargs):
templates_text = request.POST['qr_text']
print(templates_text)
factory = qrcode.image.svg.SvgImage
text = generate_random_code()
print(text)
img = qrcode.make(text,
image_factory=factory, box_size=20)
streem = BytesIO()
img.save(streem)
context = {}
context['svg'] = streem.getvalue().decode()
return render(request, "index.html", context)
|
AbrahamAdekunle/Bashir_abraham_ERP
|
bar_qr_code/views.py
|
views.py
|
py
| 1,087 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25823990802
|
import numpy as np
import pygame
import constants
class Driver(object):
"""
This class implements the car's driver: visibility, controls etc.
"""
def __init__(self,
view_distance=constants.MAX_VIEW_DISTANCE,
view_resolution=constants.VIEW_RESOLUTION,
view_angle=constants.VIEW_ANGLE):
self.view_distance = view_distance
self.view_resolution = view_resolution
self.view_angle = view_angle
self.draw_visual = True
self.init_view()
self.error = 0.
def init_view(self):
"""
Initialize the driver's view.
"""
self.view_distances = np.linspace(constants.MIN_VIEW_DISTANCE,
self.view_distance,
self.view_resolution[1])
self.view_angles = np.linspace(-self.view_angle/2.,
self.view_angle/2.,
self.view_resolution[0]) * np.pi/180.
self.view_x = np.empty(self.view_resolution)
self.view_y = np.empty(self.view_resolution)
self.view_field = np.zeros(self.view_resolution)
def look(self, car, track):
"""
Evaluate the driver's view ahead.
"""
cos_angles = np.cos(car.direction + self.view_angles)
self.view_x = (car.rect.center[0]
+ np.outer(cos_angles, self.view_distances)
).astype(int)
sin_angles = np.sin(car.direction + self.view_angles)
self.view_y = (car.rect.center[1]
- np.outer(sin_angles, self.view_distances)
).astype(int)
# limit coordinates within track area (only for checking if off track)
x_matrix0 = np.where((self.view_x < 0) |
(self.view_x >= constants.WIDTH_TRACK),
0, self.view_x)
y_matrix0 = np.where((self.view_y < 0) |
(self.view_y >= constants.HEIGHT_TRACK),
0, self.view_y)
self.view_field[:] = track.off_track(x_matrix0, y_matrix0)
# block the view behind corners etc.
if constants.BLOCK_VIEW:
for ii in range(self.view_resolution[0]):
lineview = self.view_field[ii,:]
if np.any(lineview):
lineview[np.argmax(lineview):] = 1
def draw_viewfield(self, screen):
"""
Draw the field of view.
"""
for xx, yy, colind in zip(self.view_x.flatten(),
self.view_y.flatten(),
self.view_field.flatten()):
pygame.draw.circle(screen, constants.COLOR_VIEWFIELD[int(colind)], (xx, yy), 3)
def update(self, car, *args):
"""
Default actions for drivers.
"""
car.accelerate = constants.ALWAYS_FULLGAS
car.brake = False
car.turn_left = False
car.turn_right = False
class Player(Driver):
"""
This class implements the driver for the player car.
"""
def __init__(self, *args, **kwargs):
super(Player, self).__init__(*args, **kwargs)
def update(self, car):
"""
Read keyboard for controlling the player car.
"""
super(Player, self).update(car)
keys = pygame.key.get_pressed()
if keys[pygame.K_UP]:
car.accelerate = True
if keys[pygame.K_DOWN]:
car.brake = True
if keys[pygame.K_LEFT]:
car.turn_left = True
if keys[pygame.K_RIGHT]:
car.turn_right = True
class AI_TIF(Driver):
"""
This class implements a simple AI driver that tries to keep most of
the track in front of its view field.
"""
def __init__(self, *args, **kwargs):
super(AI_TIF, self).__init__(*args, **kwargs)
# speed that still (kind of) allows a 90 degree turn
self.allowed_speed = constants.MAX_VIEW_DISTANCE / (
np.pi / (1.5 * constants.TURN_SPEED))
def update(self, car):
"""
The car turns depending on whether its closest side checks
are off track. Brake is applied if the car is going too fast
with wall in front, and especially if the corner is tight.
"""
# TODO: tuned for track and settings, generalize!
super(AI_TIF, self).update(car)
car.accelerate = True
if self.view_field[0,0] and not self.view_field[-1,0]:
car.turn_left = True
elif self.view_field[-1,0] and not self.view_field[0,0]:
car.turn_right = True
if self.view_field[self.view_resolution[0]//2, -1]:
car.brake = car.speed > self.allowed_speed
# special handling of tight corners
if not all(self.view_field[[0,-1], 1]) and car.speed > 1.:
car.brake = True
class ANN_Online(Driver):
"""
This class implements the AI driver for a neural network.
The network is trained online using stochastic gradient descent.
"""
def __init__(self,
n_hidden_neurons=5,
model_car=None,
learning_rate=0.2,
regularization=1.,
*args, **kwargs):
super(ANN_Online, self).__init__(*args, **kwargs)
self.model_car = model_car # the car to learn from
self.learning_rate = learning_rate
self.regularization = regularization
n_inputs = self.view_resolution[0] * self.view_resolution[1] + 1 # viewpoints + speed
n_outputs = 4 # accelerate, brake, left, right
self.ann = ann.ANN(n_inputs, n_hidden_neurons, n_outputs)
def update(self, own_car):
super(ANN_Online, self).update(own_car)
if constants.PLOT_ERROR:
self.evaluate_error()
self.learn()
inputs = self.prepare_inputs(own_car)
outputs = self.ann.feedforward(inputs)
self.process_output(outputs, own_car)
def learn(self):
model_inputs = self.prepare_inputs(self.model_car)
self.ann.train1(model_inputs, self.model_actions(),
self.learning_rate, self.regularization)
def prepare_inputs(self, car):
inputs = car.driver.view_field.flatten().astype(float)
# speed_transform = np.exp(-car.speed)
speed_transform = 1. / max(car.speed, 1.)
inputs = np.insert(inputs, 0, speed_transform, axis=0)
return inputs
def model_actions(self):
return np.array([self.model_car.accelerate,
self.model_car.brake,
self.model_car.turn_left,
self.model_car.turn_right]).astype(float)
def process_output(self, outputs, car):
threshold = 0.5
if outputs[0] > threshold:
car.accelerate = True
if outputs[1] > threshold:
car.brake = True
if outputs[2] > threshold:
car.turn_left = True
if outputs[3] > threshold:
car.turn_right = True
def evaluate_error(self):
"""
Evaluate the cost function with model input data.
"""
inputs = self.prepare_inputs(self.model_car)
outputs = self.ann.feedforward(inputs)
wanted = self.model_actions()
self.error = self.ann.cost(outputs, wanted)
class ANN_Batch(ANN_Online):
"""
This class implements the AI driver for a neural network.
The network is trained online using gradient descent with
a batch of accumulated samples.
"""
def __init__(self,
n_hidden_neurons=5,
model_car=None,
learning_rate=0.2,
regularization=0.1,
epochs=60,
mini_batch_size=100,
*args, **kwargs):
super(ANN_Batch, self).__init__(n_hidden_neurons, model_car,
learning_rate, regularization, *args, **kwargs)
self.epochs = epochs
self.mini_batch_size = mini_batch_size
self.reset_samples()
def learn(self):
"""
This method is called by the update method in the parent class.
Here we only spy the model car.
"""
self.input_samples.append(self.prepare_inputs(self.model_car))
self.output_samples.append(self.model_actions())
def train(self):
"""
Train the whole set of samples.
NOTE: May take a while and pause the game!
"""
print("Training {} samples for {} epochs in batches of {}".format(
len(self.input_samples), self.epochs, self.mini_batch_size))
self.ann.train_set(self.input_samples, self.output_samples,
self.learning_rate, self.regularization,
self.epochs, self.mini_batch_size)
self.reset_samples()
def reset_samples(self):
self.input_samples = []
self.output_samples = []
|
vuolleko/FormulaPF
|
driver.py
|
driver.py
|
py
| 9,079 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21216112261
|
import requests
NEWS_ENDPOINT = "https://newsapi.org/v2/everything"
NEWS_API_KEY = 'caa8a3621a5e481c96807e77fe1dfc91'
news_params = {
'q': "Tesla Inc",
'apiKey': NEWS_API_KEY
}
response = requests.get(url=NEWS_ENDPOINT, params=news_params)
response.raise_for_status()
data = response.json()["articles"]
article = []
for i in range(3):
article.append(data[i])
print(article)
|
myoaung99/Stock-news
|
eg.py
|
eg.py
|
py
| 390 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16104415512
|
import RPI.GPIO as GPIO
import os
import time
#GPIO mode (BOARD/BCM)
GPIO.setup(GPIO.BOARD)
TO_BUTTON = 32
FROM_BUTTON = 33
GPIO.setup(TO_BUTTON, GPIO.OUT)
GPIO.setup(FROM_BUTTON, GPIO.IN)
GPIO.output(TO_BUTTON, False)
GPIO.output(FROM_BUTTON, False)
while 1:
GPIO.output(TO_BUTTON, True)
#Calling the face recognition program
if GPIO.output(FROM_BUTTON, True):
os.system('python recognizer.py')
############################
|
iamjoelgeorge/AssistanceForTheBlind
|
Obstacle Detection/face_recog.py
|
face_recog.py
|
py
| 462 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31266417589
|
import pygame
import os
from tkinter import messagebox
import time
import threading
pygame.init()
pygame.mixer.set_num_channels(20)
width = 150
height = 151
channel = 0
stop_music = False
fon_m = pygame.mixer.music
fon_m.load(os.path.join("sounds", "fon_m.mp3"))
fon_m.play()
fon = pygame.image.load(os.path.join("images", "fon.png"))
HEIGHT, WIDTH = fon.get_height(), fon.get_width()
dis = pygame.display.set_mode([WIDTH, HEIGHT])
dis.blit(pygame.transform.scale(pygame.image.load(os.path.join("images", "ะทะฒัะบ.png")), (WIDTH, HEIGHT)), (0, 0))
class Monster():
def __init__(self, name, x, y, money, max_money, count):
self.image = pygame.transform.scale(pygame.image.load(os.path.join("images", f"{name}.png")), (width, height))
self.x = x
self.y = y
self.money = money
self.max_money = max_money
self.count = count
class Magazine():
def __init__(self, name, all_seconds):
self.image = pygame.transform.scale(pygame.image.load(os.path.join("images", f"{name}.png")), (width, height))
self.name = name
self.all_seconds = all_seconds
self.all_seconds_pit = all_seconds
self.image_egg = pygame.transform.scale(pygame.image.load(os.path.join("images", f"{name}_egg.png")), (100, 71))
def draw_image(dict):
for x in dict:
dis.blit(x.image, (x.x, x.y))
def monster_draw():
for elem in all_vorabularu:
draw_image(elem)
def muzic(dict, muz):
global channel
if len(dict) > 0:
try:
pygame.mixer.Channel(channel).play(pygame.mixer.Sound(os.path.join("sounds", muz)))
channel += 1
except FileNotFoundError:
pass
def all_music():
global channel
global stop_music
for i in range(channel):
pygame.mixer.Channel(i).stop()
channel = 0
if not stop_music:
muzic(bas, "ba_m.mp3")
muzic(tus, "tu_m.mp3")
muzic(mas, 'ma_m.mp3')
muzic(pas, 'pa_m.mp3')
muzic(tis, 'ti_m.mpeg')
muzic(mars, 'mar_m.mp3')
muzic(ras, 'ra_m.ogg')
muzic(sms, 'sm_m.mp3.mpeg')
muzic(lus, 'la_m.mp3')
muzic(izs, 'iz_m.mpeg')
muzic(izs, 'iz_m.mp3')
threading.Timer(7, all_music).start()
def monster_money_every_minuts():
global times
times += 60
all_draw()
threading.Timer(60, monster_money_every_minuts).start()
def staving(vocabulary, name, money, max_money, count):
global file
global vrem_name
vocabulary.append(Monster(stav, mouse[0] - width // 2, mouse[1] - height // 2, money, max_money, count))
file.write(str(vocabulary[-1].x) + ' '+ str(vocabulary[-1].y) + ' ' + str(name) + '\n')
vrem_name = ''
all_draw()
pygame.display.update()
def staving_(vocabulary, name, x, y, money, max_money, count):
vocabulary.append(Monster(name, x, y, money, max_money, count))
def draw_money():
x, y = 0, 0
text = font.render(str(my_money), True, YELLOW, BLACK)
dis.blit(text, (x, y))
y += text.get_height()
text = font.render(str(almaz), True, BLUE, BLACK)
dis.blit(text, (x, y))
def close_coor():
for i in range(mouse[0] - width//2, mouse[0] + width//2):
for j in range(mouse[1] - height//2, mouse[1] + height//2):
close.append((i, j))
def forx(voraluary: list, elem : str):
global ans
global count
for x in voraluary:
for i in range(x.x, x.x + width):
for j in range(x.y, x.y + height):
if mouse == (i, j):
ans += elem
monsters_in_pit.append(x.count)
count += 1
def clik_monster(voraluary):
for x in voraluary:
for i in range(x.x, x.x + width):
for j in range(x.y, x.y + height):
if mouse == (i, j):
return True
def magazin_clik(int, image_x):
global stav
global game
global eggs
global vrem_name
global all_seconds
global my_money
global monster_in_p
mouse = pygame.mouse.get_pos()
if mouse[0] in range(image_x[0], image_x[1]) and mouse[1] in range(HEIGHT // 3 - height // 2, HEIGHT // 3 + height):
if my_money - 300 >= 0 and vrem_name == '' and all_seconds <= 0:
my_money -= 300
game = True
all_draw()
dis.blit(magazine[int].image_egg, (WIDTH - 500 + width//2, height//2 + 27))
pygame.display.update()
vrem_name = magazine[int].name
all_seconds = magazine[int].all_seconds
monster_in_p = int
timer()
elif my_money <= 300:
messagebox.showinfo("", "ะฃ ะฒะฐั ะฝะต ั
ะฒะฐัะฐะตั ะดะตะฝะตะณ")
game = True
all_draw()
pygame.display.update()
else:
messagebox.showinfo("", "ะะธัะพะผะฝะธะบ ัะถะต ะทะฐะฝัั")
game = True
all_draw()
pygame.display.update()
def monster_money(vocabulary):
for x in vocabulary:
if x.money*(times//60) < x.max_money:
text = str(x.money*(times//60))
else:
text = str(x.max_money)
text = font.render((text), True, YELLOW)
dis.blit(text, (x.x + width // 4, x.y + height))
def sbor_money(vocabulary):
global my_money
global times
for x in vocabulary:
if x.money * (times // 60) <= x.max_money:
my_money += x.money * (times // 60)
else:
my_money += x.max_money
def all_draw():
global monster_in_p
global monster_in_pit
global monsters_in_pit
dis.blit(fon, (0, 0))
pygame.draw.rect(dis, YELLOW, (WIDTH - 100, HEIGHT - 100, 100, 100))
draw_money()
dis.blit(pit, (300, 0))
dis.blit(ppp, (WIDTH - 500, 0))
pygame.draw.rect(dis, BLACK, (0, HEIGHT - 100, 100, 100))
pygame.draw.rect(dis, BLACK, (200, 0, 100, 100))
text = font.render("-2", True, BLUE, BLACK)
dis.blit(text, (0, 150))
dis.blit(pygame.transform.scale(pygame.image.load(os.path.join("images", "ะทะฒัะบ.png")), (100, 100)),(100, HEIGHT - 100))
monster_draw()
if monster_in_p != -1:
dis.blit(magazine[monster_in_p].image_egg, (WIDTH - 500 + width//2, height//2 + 27))
stroka = str(all_seconds - seconds)
dis.blit(font.render(str(all_seconds - seconds), True, WHITE), ((WIDTH - (400 + 10 *len(stroka)-1)) , 240))
if monster_in_pit != -1:
# dis.blit(magazine[monsters_in_pit[0]].image_egg, (300, 15))
# dis.blit(magazine[monsters_in_pit[1]].image_egg, (450, 15))
stroka = str(all_seconds_pit - seconds_pit)
dis.blit(font.render(stroka, True, WHITE), (300 - (10 * len(stroka)-1), 240))
for elem in all_vorabularu:
monster_money(elem)
def timer_pit():
global monster_in_p
global all_seconds_pit
global vrem_name
global seconds_pit
global all_seconds
global monster_in_pit
global all_seconds_pit
global monsters_in_pit
global vrem_name_pit
global stav
if game:
all_draw()
pygame.display.update()
if seconds_pit < all_seconds_pit:
seconds_pit += 1
threading.Timer(1, timer_pit).start()
else:
if all_seconds == -1 and vrem_name == '' and monster_in_pit != -1:
all_draw()
dis.blit(magazine[monster_in_pit].image_egg, (300, 20))
all_seconds = magazine[monster_in_pit].all_seconds
monster_in_p = monster_in_pit
monster_in_pit = -1
seconds_pit = 0
monsters_in_pit = []
vrem_name_pit = ''
# vrem_name = magazine[monster_in_pit].name
vrem_name = stav
pygame.display.update()
timer()
else:
threading.Timer(1, timer_pit).start()
def timer():
global eggs
global stav
global seconds
global vrem_name
global seall_seconds
global monster_in_p
global all_seconds
if game:
all_draw()
pygame.display.update()
if seconds < all_seconds:
seconds += 1
threading.Timer(1, timer).start()
else:
stav = vrem_name
eggs = True
monster_in_p = -1
seconds = 0
all_seconds = -1
def all_sbor_money():
for elem in all_vorabularu:
sbor_money(elem)
bas = []
tus = []
mas = []
pas = []
lus = []
osms = []
zes = []
uts = []
uds = []
kus = []
tis = []
ras = []
mars = []
sms = []
izs = []
magazine = []
WHITE = (255, 255, 255)
YELLOW = (255, 255, 0)
BLACK = (0, 0, 0)
BLUE = (0, 0, 255)
RED = (255, 0, 0)
my_money = 1000
almaz = 100
all_vorabularu = [bas, tus, mas, pas, lus, zes, uts, uds, kus, osms, tis, sms, mars, ras, izs]
font = pygame.font.Font('freesansbold.ttf', 70)
count = 0
# fon = pygame.transform.scale(fon, (WIDTH, HEIGHT))
close = []
stav = ''
times = 0
seconds = 0
seconds_pit = 0
monsters_in_pit = []
vrem_name = ''
vrem_name_pit = ''
monster_in_p = -1
monster_in_pit = -1
ba = pygame.transform.scale(pygame.image.load(os.path.join("images", "ba.png")), (width, height))
tu = pygame.transform.scale(pygame.image.load(os.path.join("images", "tu.png")), (width, height))
ma = pygame.transform.scale(pygame.image.load(os.path.join("images", "ma.png")), (width, height))
pa = pygame.transform.scale(pygame.image.load(os.path.join("images", "pa.png")), (width, height))
lu = pygame.transform.scale(pygame.image.load(os.path.join("images", "lu.png")), (width, height))
ku = pygame.transform.scale(pygame.image.load(os.path.join("images", "ku.png")), (width, height))
ze = pygame.transform.scale(pygame.image.load(os.path.join("images", "ze.png")), (width, height))
osm =pygame.transform.scale( pygame.image.load(os.path.join("images", "osm.png")), (width, height))
ud = pygame.transform.scale(pygame.image.load(os.path.join("images", "ud.png")), (width, height))
ut = pygame.transform.scale(pygame.image.load(os.path.join("images", "ut.png")), (width, height))
mar =pygame.transform.scale( pygame.image.load(os.path.join("images", "mar.png")), (width, height))
ti = pygame.transform.scale(pygame.image.load(os.path.join("images", "ti.png")), (width, height))
ra = pygame.transform.scale(pygame.image.load(os.path.join("images", "ra.png")), (width, height))
sm = pygame.transform.scale(pygame.image.load(os.path.join("images", "sm.png")), (width, height))
iz = pygame.image.load(os.path.join("images", f"iz.png"))
file = open('my single monsters.txt','r+')
pit = pygame.image.load(os.path.join("images", "ะฟะธัะพะผะฝะธะบ.png"))
ppp = pygame.image.load(os.path.join("images", "ppp.png"))
pit_width = 220
pit_height = 300
pit = pygame.transform.scale(pit, (pit_width, pit_height))
ppp = pygame.transform.scale(ppp, (pit_width + 50, pit_height))
dis.blit(fon, (0, 0))
dis.blit(pit, (300, 0))
dis.blit(ppp, (WIDTH - 500, 0))
pygame.draw.rect(dis, YELLOW, (WIDTH - 100, HEIGHT - 100, 100, 100))
draw_money()
ee = ''
monster_money_every_minuts()
pygame.draw.rect(dis, BLACK, (0, HEIGHT - 100, 100, 100))
pygame.display.update()
all_seconds = -1
all_seconds_pit = -1
game = True
for line in file:
try:
x, y, name = line.split(' ')
x = int(x)
y = int(y)
if len(name) == 3:
ee += name[-3]
ee += name[-2]
if ee == 'ba':
staving_(bas, 'ba', x, y, 4, 18, 0)
ee = ''
elif ee == 'tu':
staving_(tus, 'tu', x, y, 2, 30, 1)
ee = ''
elif ee == 'ma':
staving_(mas, 'ma', x, y, 3, 30, 2)
ee = ''
elif ee == 'pa':
staving_(pas, 'pa', x, y, 3, 18, 3)
ee = ''
elif ee == 'ze':
staving_(zes, 'ze', x, y, 5, 225, 5)
ee = ''
elif ee == 'ud':
staving_(uds, 'ud', x, y, 6, 180, 7)
ee = ''
elif ee == 'ut':
staving_(uts, 'ut', x, y, 4, 300, 6)
ee = ''
elif ee == 'ku':
staving_(kus, 'ku', x, y, 6, 120, 8)
ee = ''
elif ee == 'lu':
staving_(lus, 'lu', x, y, 5, 225, 4)
ee = ''
elif ee == 'osm':
staving_(osms, 'osm', x, y, 5, 300, 9)
ee = ''
elif ee == 'ti':
staving_(tis, 'ti', x, y, 8, 2160, 10)
ee = ''
elif ee == 'sm':
staving_(sms, 'sm', x, y, 7, 1890, 11)
ee = ''
elif ee == 'mar':
staving_(mars, 'mar', x, y, 8, 1872, 12)
ee = ''
elif ee == 'ra':
staving_(ras, 'ra', x, y, 9, 1872, 13)
ee = ''
elif ee == 'iz':
staving_(izs, 'iz', x, y, 12, 11232, 14)
ee = ''
elif len(name) == 4:
ee += name[-4]
ee += name[-3]
ee += name[-2]
if name == 'osm':
staving_(osms, 'osm', x, y, 5, 300, 9)
ee = ''
pygame.display.update()
except:
try:
my_money, almaz = map(int, (line.split(' ')))
monster_draw()
pygame.display.update()
except:
try:
all_seconds_pit, vrem_name_pit, monster_in_pit = line.split(' ')
if int(all_seconds_pit) - times >= 0:
all_seconds_pit = int(all_seconds_pit) - times
else:
all_seconds_pit = 0
monster_in_pit = int(monster_in_pit)
except:
try:
all_seconds, vrem_name, monster_in_p = line.split(' ')
if int(all_seconds) - times >= 0:
all_seconds = int(all_seconds) - times
else:
all_seconds = -1
monster_in_p = int(monster_in_p)
except:
try:
a, b, c, d, e = line.split(' ')
times = int(time.time()) - int(a) + int(b)
except:
pass
for elem in all_vorabularu:
draw_image(elem)
for i in range(300, 300 + pit_width):
for j in range(pit_height):
close.append((i, j))
pygame.display.update()
cloak = time.time()
pit_ak = False
ans = ''
run = True
for elem in all_vorabularu:
monster_money(elem)
pygame.display.update()
eggs = False
magazine.append(Magazine('ba', 5))
magazine.append(Magazine('tu', 60))
magazine.append(Magazine('ma', 2 * 60))
magazine.append(Magazine('pa', 2 * 60 * 60))
magazine.append(Magazine('lu', 30 * 60))
magazine.append(Magazine('ze', 8 * 60 * 60))
magazine.append(Magazine('ut', 8 * 60 * 60))
magazine.append(Magazine('ud', 8 * 60 * 60))
magazine.append(Magazine('ku', 8 * 60 * 60))
magazine.append(Magazine('osm', 8 * 60 * 60))
magazine.append(Magazine('ti', 8 * 60 * 60))
magazine.append(Magazine('sm', 12 * 60 * 60))
magazine.append(Magazine('mar',12 * 60 * 60))
magazine.append(Magazine('ra', 12 * 60 * 60))
magazine.append(Magazine('iz', 24 * 60 * 60))
if all_seconds >= 0:
timer()
if all_seconds_pit >= 0:
timer_pit()
# all_music()
fon_m.stop()
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
file.write(str(my_money) + ' ' + str(almaz) + '\n')
file.write(str(int(time.time())) + ' ' + str(times) + ' ' + '2 ' +'3 ' + '\n')
try:
file.write((str(all_seconds - seconds)) + ' ' + str(vrem_name) + ' ' + str(monster_in_p) + '\n')
except:
pass
try:
if all_seconds_pit > -1:
file.write(str(all_seconds_pit - seconds_pit) + ' ' + str(vrem_name_pit) + ' ' + str(monster_in_pit) + '\n')
except:
pass
file.close()
run = False
pygame.quit()
exit()
if event.type == pygame.MOUSEBUTTONDOWN:
mouse = pygame.mouse.get_pos()
if game:
if eggs == True and stav!= '' and game:
if 1100 > mouse[0] > 200 and 600 > mouse[1] > 150:
if mouse not in close:
eggs = False
seconds = 0
if stav == 'ba':
staving(bas, 'ba', 4, 18, 0)
elif stav == 'tu':
staving(tus, 'tu', 2, 30, 1)
elif stav == 'ma':
staving(mas, 'ma', 3, 30, 2)
elif stav == 'pa':
staving(pas, 'pa', 3, 18, 3)
elif stav == 'lu':
staving(lus, 'lu', 5, 225, 4)
elif stav == 'ze':
staving(zes, 'ze', 5, 225, 5)
elif stav == 'ku':
staving(kus, 'ku', 6, 120, 8)
elif stav == 'ut':
staving(uts, 'ut', 4, 300, 6)
elif stav == 'ud':
staving(uds, 'ud', 6, 180, 7)
elif stav == 'osm':
staving(osms, 'osm', 5, 300, 9)
elif stav == 'ti':
staving(tis, 'ti', 8, 2160, 10)
elif stav == 'mar':
staving(mars, 'mar', 8, 1872, 12)
elif stav == 'sm':
staving(sms, 'sm', 7, 1890, 11)
elif stav == 'ra':
staving(ras, 'ra', 9, 1872, 13)
elif stav == 'iz':
staving(izs, 'iz', 12, 11232, 14)
close_coor()
# song_f = False
stav = ''
all_draw()
pygame.display.update()
elif pit_ak == True:
forx(bas, 'ba')
forx(tus, 'tu')
forx(mas, 'ma')
forx(pas, 'pa')
forx(lus, 'batu')
forx(uds, 'bama')
forx(uts, 'tuma')
forx(osms, 'tupa')
forx(zes, 'pama')
forx(osms, 'tupa')
forx(kus, 'tupa')
if count == 2:
seconds_pit = 0
if 'ba' in ans and 'tu' in ans and 'ma' in ans and 'pa' in ans:
all_seconds_pit = 24 * 60 * 60
monster_in_pit = 14
vrem_name_pit = 'iz'
timer_pit()
stav = 'iz'
elif 'ba' in ans and 'tu' in ans and 'ma' in ans:
all_seconds_pit = 8 * 60 * 60
monster_in_pit = 10
vrem_name_pit = 'ti'
timer_pit()
stav = 'ti'
elif 'ba' in ans and 'tu' in ans and 'pa' in ans:
all_seconds_pit = 12 * 60 * 60
monster_in_pit = 12
vrem_name_pit = 'mar'
timer_pit()
stav = 'mar'
elif 'ba' in ans and 'pa' in ans and 'ma' in ans:
all_seconds_pit = 12 * 60 * 60
monster_in_pit = 13
timer_pit()
vrem_name_pit = 'ra'
stav = 'ra'
elif 'pa' in ans and 'tu' in ans and 'ma' in ans:
all_seconds_pit = 12 * 60 * 60
monster_in_pit = 11
vrem_name_pit = 'sm'
timer_pit()
stav = 'sm'
elif 'tu' in ans and 'ba' in ans:
all_seconds_pit = 30 * 60
monster_in_pit = 4
vrem_name_pit = 'lu'
timer_pit()
stav = 'lu'
elif 'ma' in ans and 'tu' in ans:
all_seconds_pit = 8 * 60 * 60
monster_in_pit = 6
vrem_name_pit = 'ut'
timer_pit()
stav = 'ut'
elif 'ba' in ans and 'ma' in ans:
all_seconds_pit = 8 * 60 * 60
monster_in_pit = 7
vrem_name_pit = 'ud'
timer_pit()
stav = 'ud'
elif 'tu' in ans and 'pa' in ans:
all_seconds_pit = 8 * 60 * 60
monster_in_pit = 9
vrem_name_pit = 'osm'
timer_pit()
stav = 'osm'
elif 'ba' in ans and 'pa' in ans:
all_seconds_pit = 8 * 60 * 60
monster_in_pit = 8
vrem_name_pit = 'ku'
timer_pit()
stav = 'ku'
elif 'ma' in ans and 'pa' in ans:
all_seconds_pit = 8 * 60 * 60
monster_in_pit = 5
vrem_name_pit = 'ze'
timer_pit()
stav = 'ze'
all_draw()
pygame.display.update()
ans = ''
pit_ak = False
count = 0
elif mouse[0] in range(WIDTH - 100, WIDTH) and mouse[1] in range(HEIGHT - 100, HEIGHT):
all_sbor_money()
times = 0
dis.fill(WHITE)
pygame.draw.rect(dis, BLACK, (WIDTH - 100, HEIGHT - 100, 100, 100))
game = False
draw_money()
for x in range(0, 4):
a = 0
if x == 1:
a = WIDTH // 4
elif x == 2:
a = WIDTH // 2
elif x == 3:
a = WIDTH - WIDTH // 4
dis.blit(magazine[x].image, (a, HEIGHT // 3))
text = font.render('300', True, YELLOW, WHITE)
dis.blit(text, (a, HEIGHT // 3 + height))
elif mouse[0] in range(0, 100) and mouse[1] in range(HEIGHT - 100, HEIGHT):
all_sbor_money()
times = 0
all_draw()
pygame.display.update()
elif mouse[0] in range(300, 300 + pit_width) and mouse[1] in range(pit_height):
if pit_ak == False:
pit_ak = True
elif mouse[0] in range(0, 100) and mouse[1] in range(150, 250):
almaz -= 2
if seconds_pit + 3600 <= all_seconds_pit:
seconds_pit += 3600
else:
seconds_pit = all_seconds_pit
if seconds + 3600 <= all_seconds:
seconds += 3600
else:
seconds = all_seconds
elif mouse[0] in range(200, 300) and mouse[1] in range(0, 100):
bas = []
tus = []
mas = []
pas = []
lus = []
osms = []
zes = []
uts = []
uds = []
kus = []
tis = []
ras = []
mars = []
sms = []
izs = []
all_vorabularu = [bas, tus, mas, pas, lus, zes, uts, uds, kus, osms, tis, sms, mars, ras, izs]
my_money = 1000
almaz = 1000
close = []
vrem_name = ''
vrem_name_pit = ''
seconds = 0
seconds_pit = 0
all_seconds = -1
all_seconds_pit = -1
monster_in_p = -1
monster_in_pit = -1
monsters_in_pit = []
channel = 0
count = 0
stav = ''
times = 0
file.truncate(0)
all_draw()
pygame.display.update()
elif mouse[0] in range(WIDTH - 300, WIDTH) and mouse[1] in range (0, 300):
my_money += 10000
almaz += 100
seconds = all_seconds
seconds_pit = all_seconds_pit
all_draw()
pygame.display.update()
elif mouse[0] in range(100, 200) and mouse[1] in range(HEIGHT - 100, HEIGHT):
if not stop_music:
stop_music = True
else:
stop_music = False
all_music()
pygame.display.update()
else:
magazin_clik(0, (0, 0 + width))
magazin_clik(1, (WIDTH // 4, WIDTH // 4 + width))
magazin_clik(2, (WIDTH // 2, WIDTH // 2 + width))
magazin_clik(3, (WIDTH - WIDTH // 4, WIDTH - WIDTH // 4 + width))
if mouse[0] in range(WIDTH - 100, WIDTH) and mouse[1] in range(HEIGHT - 100, HEIGHT):
game = True
all_draw()
pygame.display.update()
|
solvalkon/python_study
|
my single monsters/my single monsters class.py
|
my single monsters class.py
|
py
| 26,982 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29157621722
|
# -*- coding: utf-8 -*-
import logging
import aiogrpc
class AsyncPluginManager:
"""
Connects to a running mavsdk server or starts one and manages plugins
"""
@classmethod
async def create(cls, host, port=50051):
self = AsyncPluginManager()
self.host = host
self.port = port
self.plugins = {}
await self._connect_backend()
return self
async def _connect_backend(self):
"""
Initializes the connection to the running backend
"""
#: gRPC channel
self._channel = aiogrpc.insecure_channel(
"{}:{}".format(self.host, self.port),
standalone_pool_for_streaming=True
)
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler()) # Avoid errors when user has not configured logging
logger.debug("Waiting for mavsdk_server to be ready...")
await aiogrpc.channel_ready_future(self._channel)
logger.debug("Connected to mavsdk_server!")
@property
def channel(self):
"""
gRPC channel to the backend
"""
return self._channel
|
mavlink/MAVSDK-Python
|
mavsdk/async_plugin_manager.py
|
async_plugin_manager.py
|
py
| 1,162 |
python
|
en
|
code
| 246 |
github-code
|
6
|
40607525974
|
'''
ETTTP_Client_skeleton.py
34743-02 Information Communications
Term Project on Implementation of Ewah Tic-Tac-Toe Protocol
Skeleton Code Prepared by JeiHee Cho
May 24, 2023
'''
import random
import tkinter as tk
from socket import *
import _thread
from ETTTP_TicTacToe import TTT, check_msg
if __name__ == '__main__':
SERVER_IP = '127.0.0.1'
MY_IP = '127.0.0.1'
SERVER_PORT = 12000
SIZE = 1024
SERVER_ADDR = (SERVER_IP, SERVER_PORT)
with socket(AF_INET, SOCK_STREAM) as client_socket:
client_socket.connect(SERVER_ADDR)
###################################################################
# Receive who will start first from the server
start_move_message = client_socket.recv(SIZE).decode().strip()
check_result = (check_msg(start_move_message, MY_IP))
start_index = start_move_message.index("First-Move:") + len("First-Move:")
start_user = start_move_message[start_index:]
if start_user=="ME":
print("์๋ฒ ์ ")
start=0
else:
print("ํด๋ผ์ด์ธํธ ์ ")
start=1
# etttp์ ๋ง๋ ํ์์ธ ๊ฒ์ ํ์ธํ๊ณ Send ACK
if check_result:
ack_message ="ACK"+start_move_message[4:]
client_socket.sendall(ack_message.encode())
else:
print("๋ฉ์ธ์ง๊ฐ ํ๋ฆผ")
quit()
###################################################################
# Start game
root = TTT(target_socket=client_socket, src_addr=MY_IP,dst_addr=SERVER_IP)
root.play(start_user=start)
root.mainloop()
|
seung-eon/TicTacToe
|
ETTTP_Client.py
|
ETTTP_Client.py
|
py
| 1,681 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23065976802
|
import numpy as np
import os, sys, math
import pandas as pd
import dash
#import dash_core_components as dcc
from dash import dcc
#import dash_html_components as html
from dash import html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
class Obstacle():
def __init__(self, df, dataset, frame_name):
self.df = df
self.coordinate_system = dataset.coordinate_system
if 'id' in df.keys():
self.id = df['id']
else:
self.id = -1.
if self.coordinate_system == 'camera_coordinate_system':
self.x_center, self.y_center, self.z_center, self.yaw = dataset.project_center_camera_to_lidar(frame_name, df['x'], df['y'], df['z'], df['yaw'])
elif self.coordinate_system == 'lidar_coordinate_system':
self.x_center = df['x']
self.y_center = df['y']
self.z_center = df['z']
self.yaw = df['yaw']
else:
print("Coordinate System: {} NOT implemented!".format(self.coordinate_system))
sys.exit(1)
self.w = df['w']
self.l = df['l']
self.h = df['h']
if 'score' in df.keys():
self.score = df['score']
else:
self.score = -1.
self.label = df['label']
def print_obstacle(self):
print('------')
print(self.df)
print('------\n')
################################ 3D BOXES ################################
def return_vertex(df, dataset, frame_name):
all_vertex = []
all_labels = []
all_obstacles = []
for i in range(len(df)):
# Parser obstacle
obstacle = Obstacle(df.iloc[int(i)], dataset, frame_name)
#obstacle.print_obstacle()
id_box = int(obstacle.id)
x_center = obstacle.x_center
y_center = obstacle.y_center
z_center = obstacle.z_center
yaw = obstacle.yaw
w_half = obstacle.w / 2.
l_half = obstacle.l / 2.
h = obstacle.h
# Construir vertices
point_A_x = (x_center - l_half * math.cos(-yaw) - w_half * math.sin(-yaw))
point_A_y = (y_center + l_half * math.sin(-yaw) - w_half * math.cos(-yaw))
# Get B point
point_B_x = (x_center + l_half* math.cos(-yaw) - w_half * math.sin(-yaw))
point_B_y = (y_center - l_half* math.sin(-yaw) - w_half * math.cos(-yaw))
# Get C point
point_C_x = (x_center + l_half * math.cos(-yaw) + w_half * math.sin(-yaw))
point_C_y = (y_center - l_half * math.sin(-yaw) + w_half * math.cos(-yaw))
# Get D point
point_D_x = (x_center - l_half * math.cos(-yaw) + w_half * math.sin(-yaw))
point_D_y = (y_center + l_half * math.sin(-yaw) + w_half * math.cos(-yaw))
vertices = np.array([
[point_A_x, point_A_y, z_center],
[point_B_x, point_B_y, z_center],
[point_C_x, point_C_y, z_center],
[point_D_x, point_D_y, z_center],
[point_A_x, point_A_y, z_center+h],
[point_B_x, point_B_y, z_center+h],
[point_C_x, point_C_y, z_center+h],
[point_D_x, point_D_y, z_center+h]
])
indices = np.array([
[0, 1, 2, 3],
[0, 1, 5, 4],
[1, 2, 6, 5],
[2, 3, 7, 6],
[3, 0, 4, 7],
[4, 5, 6, 7]
])
all_vertex.append(vertices)
all_labels.append('{}-{}: {:.3f}'.format(obstacle.label, id_box, obstacle.score))
return all_vertex, all_labels
def draw_annotations_frame(dataset, frame_list, frame, fig):
if frame_list is None: return fig
df = pd.read_csv(os.path.join(dataset.annotations_data_path, frame_list[frame]), delimiter=' ', names=dataset.annotations_format)
# Calcular los vertices de la caja
all_vertex, all_labels = return_vertex(df, dataset, frame_list[frame])
for i, _ in enumerate(all_vertex):
vertices = all_vertex[i]
label = all_labels[i]
faces = go.Mesh3d(
x=vertices[:, 0],
y=vertices[:, 1],
z=vertices[:, 2],
i = [7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2],
j = [3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],
k = [0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6],
name=label,
opacity=0.3
)
fig.add_trace(faces)
return fig
################################ LiDAR ################################
def draw_lidar(dataset, lidar_frame_list, frame, lidar_res):
filename = os.path.join(dataset.lidar_data_path, lidar_frame_list[frame])
points = dataset.load_lidar(filename)
PC_scatter = go.Scatter3d(
x=points["x"],
y=points["y"],
z=points["z"],
mode='markers',
marker=dict(
size=lidar_res,
color=[0,0,0],
opacity=0.3
)
)
return PC_scatter
|
ArmanAstud/3D_detection_visualizer
|
scripts/utils.py
|
utils.py
|
py
| 4,363 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35818166590
|
import sublime
import os
import platform
import re
import subprocess
class MergedSettings():
def __init__(self):
# This is a Sublime settings object.
self.plugin = sublime.load_settings("GoTools.sublime-settings")
# This is just a dict.
self.project = sublime.active_window().active_view().settings().get('GoTools', {})
def get(self, key, default = None):
return self.project.get(key, self.plugin.get(key, default))
class GoToolsSettings():
def __init__(self):
if not self.GoEnv:
raise Exception("GoTools doesn't appear to be initialized")
# Load the Sublime settings files.
settings = MergedSettings()
self.goroot = self.GoEnv["GOROOT"]
self.goarch = self.GoEnv["GOHOSTARCH"]
self.goos = self.GoEnv["GOHOSTOS"]
self.go_tools = self.GoEnv["GOTOOLDIR"]
if not self.goroot or not self.goarch or not self.goos or not self.go_tools:
raise Exception("GoTools: ERROR: Couldn't detect Go runtime information from `go env`.")
# The GOROOT bin directory is namespaced with the GOOS and GOARCH.
self.gorootbin = os.path.join(self.goroot, "bin", self.goos + "_" + self.goarch)
# For GOPATH, env < plugin < project, and project supports replacement of
# ${gopath} with whatever preceded in the hierarchy.
self.gopath = settings.plugin.get('gopath', os.getenv('GOPATH', ''))
if len(self.gopath) == 0:
self.gopath = self.GoEnv['GOPATH']
if 'gopath' in settings.project:
self.gopath = settings.project['gopath'].replace('${gopath}', self.gopath)
if self.gopath is None or len(self.gopath) == 0:
raise Exception("GoTools: ERROR: You must set either the `gopath` setting or the GOPATH environment variable.")
# Plugin feature settings.
self.debug_enabled = settings.get("debug_enabled")
self.format_on_save = settings.get("format_on_save")
self.format_backend = settings.get("format_backend")
self.autocomplete = settings.get("autocomplete")
self.goto_def_backend = settings.get("goto_def_backend")
# Project feature settings.
self.project_package = settings.get("project_package")
self.build_packages = settings.get("build_packages", [])
self.test_packages = settings.get("test_packages", [])
self.tagged_test_tags = settings.get("tagged_test_tags", [])
self.tagged_test_packages = settings.get("tagged_test_packages", [])
self.verbose_tests = settings.get("verbose_tests", False)
self.test_timeout = settings.get("test_timeout", None)
# For Go runtime information, verify go on PATH and ask it about itself.
def load_goenv():
# Look up the system PATH.
ospath = os.getenv('PATH', '')
# For Darwin, get a login shell to resolve PATH as launchd won't always
# provide it. This technique is borrowed from SublimeFixMacPath[1].
# [1] https://github.com/int3h/SublimeFixMacPath.
if platform.system() == "Darwin":
command = "/usr/bin/login -fqpl $USER $SHELL -l -c 'printf \"%s\" \"$PATH\"'"
stdout, stderr = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).communicate()
if stderr and len(stderr) > 0:
raise Exception("GoTools: couldn't resolve system PATH: " + stderr.decode())
ospath = stdout.decode()
# Find the go binary on PATH, and abort initialization if it can't be found.
gobinary = None
goname = "go"
if platform.system() == "Windows":
goname = "go.exe"
for segment in ospath.split(os.pathsep):
candidate = os.path.join(segment, goname)
if os.path.isfile(candidate):
gobinary = candidate
break
if not gobinary:
raise Exception("GoTools: couldn't find the go binary in PATH: " + ospath)
# Hide popups on Windows
si = None
if platform.system() == "Windows":
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Gather up the Go environment using `go env`.
print("GoTools: initializing using Go binary: " + gobinary)
goenv = {}
stdout, stderr = subprocess.Popen([gobinary, 'env'], stdout=subprocess.PIPE, startupinfo=si).communicate()
if stderr and len(stderr) > 0:
raise Exception("GoTools: '" + gobinary + " env' failed during initialization: " + stderr.decode())
for env in stdout.decode().splitlines():
match = re.match('(.*)=\"(.*)\"', env)
if platform.system() == "Windows":
match = re.match('(?:set\s)(.*)=(.*)', env)
if match and match.group(1) and match.group(2):
goenv[match.group(1)] = match.group(2)
return goenv
# Load and keep a cache of the Go runtime information during plugin init.
GoToolsSettings.GoEnv = load_goenv()
print("GoTools: initialized with Go environment: "+str(GoToolsSettings.GoEnv))
|
uraza/GoTools
|
gotools_settings.py
|
gotools_settings.py
|
py
| 4,663 |
python
|
en
|
code
| null |
github-code
|
6
|
25068490855
|
from typing import Tuple, List
from asendia_us_lib.shipping_rate_request import ShippingRateRequest
from asendia_us_lib.shipping_rate_response import ShippingRate
from purplship.core.units import Packages, Services, Options
from purplship.core.utils import Serializable, DP, NF
from purplship.core.models import (
RateRequest,
RateDetails,
Message
)
from purplship.providers.asendia_us.units import Service, Option, ProcessingLocation
from purplship.providers.asendia_us.error import parse_error_response
from purplship.providers.asendia_us.utils import Settings
def parse_rate_response(response: dict, settings: Settings) -> Tuple[List[RateDetails], List[Message]]:
errors = parse_error_response(response, settings)
details = [
_extract_details(detail, settings)
for detail in (response.get('shippingRates') or [])
]
return details, errors
def _extract_details(detail: dict, settings: Settings) -> RateDetails:
rate = DP.to_object(ShippingRate, detail)
return RateDetails(
carrier_id=settings.carrier_id,
carrier_name=settings.carrier_name,
currency=rate.currencyType,
service=Service.map(rate.productCode).name_or_key,
base_charge=NF.decimal(rate.rate),
total_charge=NF.decimal(rate.rate)
)
def rate_request(payload: RateRequest, settings: Settings) -> Serializable[ShippingRateRequest]:
package = Packages(payload.parcels).single
service = (Services(payload.services, Service).first or Service.asendia_us_all).value
options = Options(payload.options, Option)
request = ShippingRateRequest(
accountNumber=settings.account_number,
subAccountNumber=options.asendia_sub_account_number,
processingLocation=ProcessingLocation.map(options.asendia_processing_location or "SFO").name,
recipientPostalCode=payload.recipient.postal_code,
recipientCountryCode=payload.recipient.country_code,
totalPackageWeight=package.weight.value,
weightUnit=package.weight_unit.value.lower(),
dimLength=package.length.value,
dimWidth=package.width.value,
dimHeight=package.height.value,
dimUnit=package.dimension_unit.value,
productCode=service,
)
return Serializable(request, DP.to_dict)
|
danh91/purplship
|
sdk/extensions/asendia_us/purplship/providers/asendia_us/rate.py
|
rate.py
|
py
| 2,301 |
python
|
en
|
code
| null |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.